2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static struct workqueue_struct *_busy_wq;
66 static LIST_HEAD(chan_list);
67 static DEFINE_RWLOCK(chan_list_lock);
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
75 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
76 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
77 struct l2cap_chan *chan, int err);
79 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
81 /* ---- L2CAP channels ---- */
83 static inline void chan_hold(struct l2cap_chan *c)
85 atomic_inc(&c->refcnt);
88 static inline void chan_put(struct l2cap_chan *c)
90 if (atomic_dec_and_test(&c->refcnt))
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
98 list_for_each_entry(c, &conn->chan_l, list) {
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
108 struct l2cap_chan *c;
110 list_for_each_entry(c, &conn->chan_l, list) {
117 /* Find channel with given SCID.
118 * Returns locked socket */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
121 struct l2cap_chan *c;
123 read_lock(&conn->chan_lock);
124 c = __l2cap_get_chan_by_scid(conn, cid);
127 read_unlock(&conn->chan_lock);
131 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
133 struct l2cap_chan *c;
135 list_for_each_entry(c, &conn->chan_l, list) {
136 if (c->ident == ident)
142 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
144 struct l2cap_chan *c;
146 read_lock(&conn->chan_lock);
147 c = __l2cap_get_chan_by_ident(conn, ident);
150 read_unlock(&conn->chan_lock);
154 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
156 struct l2cap_chan *c;
158 list_for_each_entry(c, &chan_list, global_l) {
159 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
168 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
172 write_lock_bh(&chan_list_lock);
174 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
187 for (p = 0x1001; p < 0x1100; p += 2)
188 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
189 chan->psm = cpu_to_le16(p);
190 chan->sport = cpu_to_le16(p);
197 write_unlock_bh(&chan_list_lock);
201 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
203 write_lock_bh(&chan_list_lock);
207 write_unlock_bh(&chan_list_lock);
212 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
214 u16 cid = L2CAP_CID_DYN_START;
216 for (; cid < L2CAP_CID_DYN_END; cid++) {
217 if (!__l2cap_get_chan_by_scid(conn, cid))
224 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
226 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
228 if (!mod_timer(timer, jiffies + timeout))
232 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
234 BT_DBG("chan %p state %d", chan, chan->state);
236 if (timer_pending(timer) && del_timer(timer))
240 static void l2cap_state_change(struct l2cap_chan *chan, int state)
243 chan->ops->state_change(chan->data, state);
246 static void l2cap_chan_timeout(unsigned long arg)
248 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
249 struct sock *sk = chan->sk;
252 BT_DBG("chan %p state %d", chan, chan->state);
256 if (sock_owned_by_user(sk)) {
257 /* sk is owned by user. Try again later */
258 __set_chan_timer(chan, HZ / 5);
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
272 l2cap_chan_close(chan, reason);
276 chan->ops->close(chan->data);
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
282 struct l2cap_chan *chan;
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
290 write_lock_bh(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock_bh(&chan_list_lock);
294 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
296 chan->state = BT_OPEN;
298 atomic_set(&chan->refcnt, 1);
303 void l2cap_chan_destroy(struct l2cap_chan *chan)
305 write_lock_bh(&chan_list_lock);
306 list_del(&chan->global_l);
307 write_unlock_bh(&chan_list_lock);
312 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
314 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
315 chan->psm, chan->dcid);
317 conn->disc_reason = 0x13;
321 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
322 if (conn->hcon->type == LE_LINK) {
324 chan->omtu = L2CAP_LE_DEFAULT_MTU;
325 chan->scid = L2CAP_CID_LE_DATA;
326 chan->dcid = L2CAP_CID_LE_DATA;
328 /* Alloc CID for connection-oriented socket */
329 chan->scid = l2cap_alloc_cid(conn);
330 chan->omtu = L2CAP_DEFAULT_MTU;
332 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
333 /* Connectionless socket */
334 chan->scid = L2CAP_CID_CONN_LESS;
335 chan->dcid = L2CAP_CID_CONN_LESS;
336 chan->omtu = L2CAP_DEFAULT_MTU;
338 /* Raw socket can send/recv signalling messages only */
339 chan->scid = L2CAP_CID_SIGNALING;
340 chan->dcid = L2CAP_CID_SIGNALING;
341 chan->omtu = L2CAP_DEFAULT_MTU;
346 list_add(&chan->list, &conn->chan_l);
350 * Must be called on the locked socket. */
351 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
353 struct sock *sk = chan->sk;
354 struct l2cap_conn *conn = chan->conn;
355 struct sock *parent = bt_sk(sk)->parent;
357 __clear_chan_timer(chan);
359 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
362 /* Delete from channel list */
363 write_lock_bh(&conn->chan_lock);
364 list_del(&chan->list);
365 write_unlock_bh(&conn->chan_lock);
369 hci_conn_put(conn->hcon);
372 l2cap_state_change(chan, BT_CLOSED);
373 sock_set_flag(sk, SOCK_ZAPPED);
379 bt_accept_unlink(sk);
380 parent->sk_data_ready(parent, 0);
382 sk->sk_state_change(sk);
384 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
385 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
388 skb_queue_purge(&chan->tx_q);
390 if (chan->mode == L2CAP_MODE_ERTM) {
391 struct srej_list *l, *tmp;
393 __clear_retrans_timer(chan);
394 __clear_monitor_timer(chan);
395 __clear_ack_timer(chan);
397 skb_queue_purge(&chan->srej_q);
398 skb_queue_purge(&chan->busy_q);
400 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
407 static void l2cap_chan_cleanup_listen(struct sock *parent)
411 BT_DBG("parent %p", parent);
413 /* Close not yet accepted channels */
414 while ((sk = bt_accept_dequeue(parent, NULL))) {
415 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
416 __clear_chan_timer(chan);
418 l2cap_chan_close(chan, ECONNRESET);
420 chan->ops->close(chan->data);
424 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
426 struct l2cap_conn *conn = chan->conn;
427 struct sock *sk = chan->sk;
429 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
431 switch (chan->state) {
433 l2cap_chan_cleanup_listen(sk);
435 l2cap_state_change(chan, BT_CLOSED);
436 sock_set_flag(sk, SOCK_ZAPPED);
441 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
442 conn->hcon->type == ACL_LINK) {
443 __clear_chan_timer(chan);
444 __set_chan_timer(chan, sk->sk_sndtimeo);
445 l2cap_send_disconn_req(conn, chan, reason);
447 l2cap_chan_del(chan, reason);
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 struct l2cap_conn_rsp rsp;
456 if (bt_sk(sk)->defer_setup)
457 result = L2CAP_CR_SEC_BLOCK;
459 result = L2CAP_CR_BAD_PSM;
460 l2cap_state_change(chan, BT_DISCONN);
462 rsp.scid = cpu_to_le16(chan->dcid);
463 rsp.dcid = cpu_to_le16(chan->scid);
464 rsp.result = cpu_to_le16(result);
465 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
466 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
470 l2cap_chan_del(chan, reason);
475 l2cap_chan_del(chan, reason);
479 sock_set_flag(sk, SOCK_ZAPPED);
484 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
486 if (chan->chan_type == L2CAP_CHAN_RAW) {
487 switch (chan->sec_level) {
488 case BT_SECURITY_HIGH:
489 return HCI_AT_DEDICATED_BONDING_MITM;
490 case BT_SECURITY_MEDIUM:
491 return HCI_AT_DEDICATED_BONDING;
493 return HCI_AT_NO_BONDING;
495 } else if (chan->psm == cpu_to_le16(0x0001)) {
496 if (chan->sec_level == BT_SECURITY_LOW)
497 chan->sec_level = BT_SECURITY_SDP;
499 if (chan->sec_level == BT_SECURITY_HIGH)
500 return HCI_AT_NO_BONDING_MITM;
502 return HCI_AT_NO_BONDING;
504 switch (chan->sec_level) {
505 case BT_SECURITY_HIGH:
506 return HCI_AT_GENERAL_BONDING_MITM;
507 case BT_SECURITY_MEDIUM:
508 return HCI_AT_GENERAL_BONDING;
510 return HCI_AT_NO_BONDING;
515 /* Service level security */
516 static inline int l2cap_check_security(struct l2cap_chan *chan)
518 struct l2cap_conn *conn = chan->conn;
521 auth_type = l2cap_get_auth_type(chan);
523 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
526 static u8 l2cap_get_ident(struct l2cap_conn *conn)
530 /* Get next available identificator.
531 * 1 - 128 are used by kernel.
532 * 129 - 199 are reserved.
533 * 200 - 254 are used by utilities like l2ping, etc.
536 spin_lock_bh(&conn->lock);
538 if (++conn->tx_ident > 128)
543 spin_unlock_bh(&conn->lock);
548 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
550 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
553 BT_DBG("code 0x%2.2x", code);
558 if (lmp_no_flush_capable(conn->hcon->hdev))
559 flags = ACL_START_NO_FLUSH;
563 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
565 hci_send_acl(conn->hcon, skb, flags);
568 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
571 struct l2cap_hdr *lh;
572 struct l2cap_conn *conn = chan->conn;
573 int count, hlen = L2CAP_HDR_SIZE + 2;
576 if (chan->state != BT_CONNECTED)
579 if (chan->fcs == L2CAP_FCS_CRC16)
582 BT_DBG("chan %p, control 0x%2.2x", chan, control);
584 count = min_t(unsigned int, conn->mtu, hlen);
585 control |= L2CAP_CTRL_FRAME_TYPE;
587 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
588 control |= L2CAP_CTRL_FINAL;
590 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
591 control |= L2CAP_CTRL_POLL;
593 skb = bt_skb_alloc(count, GFP_ATOMIC);
597 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
598 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
599 lh->cid = cpu_to_le16(chan->dcid);
600 put_unaligned_le16(control, skb_put(skb, 2));
602 if (chan->fcs == L2CAP_FCS_CRC16) {
603 u16 fcs = crc16(0, (u8 *)lh, count - 2);
604 put_unaligned_le16(fcs, skb_put(skb, 2));
607 if (lmp_no_flush_capable(conn->hcon->hdev))
608 flags = ACL_START_NO_FLUSH;
612 bt_cb(skb)->force_active = chan->force_active;
614 hci_send_acl(chan->conn->hcon, skb, flags);
617 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
619 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
620 control |= L2CAP_SUPER_RCV_NOT_READY;
621 set_bit(CONN_RNR_SENT, &chan->conn_state);
623 control |= L2CAP_SUPER_RCV_READY;
625 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
627 l2cap_send_sframe(chan, control);
630 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
632 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
635 static void l2cap_do_start(struct l2cap_chan *chan)
637 struct l2cap_conn *conn = chan->conn;
639 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
640 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
643 if (l2cap_check_security(chan) &&
644 __l2cap_no_conn_pending(chan)) {
645 struct l2cap_conn_req req;
646 req.scid = cpu_to_le16(chan->scid);
649 chan->ident = l2cap_get_ident(conn);
650 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
652 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
656 struct l2cap_info_req req;
657 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
659 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
660 conn->info_ident = l2cap_get_ident(conn);
662 mod_timer(&conn->info_timer, jiffies +
663 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
665 l2cap_send_cmd(conn, conn->info_ident,
666 L2CAP_INFO_REQ, sizeof(req), &req);
670 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
672 u32 local_feat_mask = l2cap_feat_mask;
674 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
677 case L2CAP_MODE_ERTM:
678 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
679 case L2CAP_MODE_STREAMING:
680 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
686 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
689 struct l2cap_disconn_req req;
696 if (chan->mode == L2CAP_MODE_ERTM) {
697 __clear_retrans_timer(chan);
698 __clear_monitor_timer(chan);
699 __clear_ack_timer(chan);
702 req.dcid = cpu_to_le16(chan->dcid);
703 req.scid = cpu_to_le16(chan->scid);
704 l2cap_send_cmd(conn, l2cap_get_ident(conn),
705 L2CAP_DISCONN_REQ, sizeof(req), &req);
707 l2cap_state_change(chan, BT_DISCONN);
711 /* ---- L2CAP connections ---- */
712 static void l2cap_conn_start(struct l2cap_conn *conn)
714 struct l2cap_chan *chan, *tmp;
716 BT_DBG("conn %p", conn);
718 read_lock(&conn->chan_lock);
720 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
721 struct sock *sk = chan->sk;
725 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
730 if (chan->state == BT_CONNECT) {
731 struct l2cap_conn_req req;
733 if (!l2cap_check_security(chan) ||
734 !__l2cap_no_conn_pending(chan)) {
739 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
740 && test_bit(CONF_STATE2_DEVICE,
741 &chan->conf_state)) {
742 /* l2cap_chan_close() calls list_del(chan)
743 * so release the lock */
744 read_unlock_bh(&conn->chan_lock);
745 l2cap_chan_close(chan, ECONNRESET);
746 read_lock_bh(&conn->chan_lock);
751 req.scid = cpu_to_le16(chan->scid);
754 chan->ident = l2cap_get_ident(conn);
755 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
757 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
760 } else if (chan->state == BT_CONNECT2) {
761 struct l2cap_conn_rsp rsp;
763 rsp.scid = cpu_to_le16(chan->dcid);
764 rsp.dcid = cpu_to_le16(chan->scid);
766 if (l2cap_check_security(chan)) {
767 if (bt_sk(sk)->defer_setup) {
768 struct sock *parent = bt_sk(sk)->parent;
769 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
770 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
771 parent->sk_data_ready(parent, 0);
774 l2cap_state_change(chan, BT_CONFIG);
775 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
776 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
779 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
780 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
783 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
786 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
787 rsp.result != L2CAP_CR_SUCCESS) {
792 set_bit(CONF_REQ_SENT, &chan->conf_state);
793 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
794 l2cap_build_conf_req(chan, buf), buf);
795 chan->num_conf_req++;
801 read_unlock(&conn->chan_lock);
804 /* Find socket with cid and source bdaddr.
805 * Returns closest match, locked.
807 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
809 struct l2cap_chan *c, *c1 = NULL;
811 read_lock(&chan_list_lock);
813 list_for_each_entry(c, &chan_list, global_l) {
814 struct sock *sk = c->sk;
816 if (state && c->state != state)
819 if (c->scid == cid) {
821 if (!bacmp(&bt_sk(sk)->src, src)) {
822 read_unlock(&chan_list_lock);
827 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
832 read_unlock(&chan_list_lock);
837 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
839 struct sock *parent, *sk;
840 struct l2cap_chan *chan, *pchan;
844 /* Check if we have socket listening on cid */
845 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
852 bh_lock_sock(parent);
854 /* Check for backlog size */
855 if (sk_acceptq_is_full(parent)) {
856 BT_DBG("backlog full %d", parent->sk_ack_backlog);
860 chan = pchan->ops->new_connection(pchan->data);
866 write_lock_bh(&conn->chan_lock);
868 hci_conn_hold(conn->hcon);
870 bacpy(&bt_sk(sk)->src, conn->src);
871 bacpy(&bt_sk(sk)->dst, conn->dst);
873 bt_accept_enqueue(parent, sk);
875 __l2cap_chan_add(conn, chan);
877 __set_chan_timer(chan, sk->sk_sndtimeo);
879 l2cap_state_change(chan, BT_CONNECTED);
880 parent->sk_data_ready(parent, 0);
882 write_unlock_bh(&conn->chan_lock);
885 bh_unlock_sock(parent);
888 static void l2cap_chan_ready(struct sock *sk)
890 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
891 struct sock *parent = bt_sk(sk)->parent;
893 BT_DBG("sk %p, parent %p", sk, parent);
895 chan->conf_state = 0;
896 __clear_chan_timer(chan);
898 l2cap_state_change(chan, BT_CONNECTED);
899 sk->sk_state_change(sk);
902 parent->sk_data_ready(parent, 0);
905 static void l2cap_conn_ready(struct l2cap_conn *conn)
907 struct l2cap_chan *chan;
909 BT_DBG("conn %p", conn);
911 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
912 l2cap_le_conn_ready(conn);
914 read_lock(&conn->chan_lock);
916 list_for_each_entry(chan, &conn->chan_l, list) {
917 struct sock *sk = chan->sk;
921 if (conn->hcon->type == LE_LINK) {
922 if (smp_conn_security(conn, chan->sec_level))
923 l2cap_chan_ready(sk);
925 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
926 __clear_chan_timer(chan);
927 l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
930 } else if (chan->state == BT_CONNECT)
931 l2cap_do_start(chan);
936 read_unlock(&conn->chan_lock);
939 /* Notify sockets that we cannot guaranty reliability anymore */
940 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
942 struct l2cap_chan *chan;
944 BT_DBG("conn %p", conn);
946 read_lock(&conn->chan_lock);
948 list_for_each_entry(chan, &conn->chan_l, list) {
949 struct sock *sk = chan->sk;
951 if (chan->force_reliable)
955 read_unlock(&conn->chan_lock);
958 static void l2cap_info_timeout(unsigned long arg)
960 struct l2cap_conn *conn = (void *) arg;
962 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
963 conn->info_ident = 0;
965 l2cap_conn_start(conn);
968 static void l2cap_conn_del(struct hci_conn *hcon, int err)
970 struct l2cap_conn *conn = hcon->l2cap_data;
971 struct l2cap_chan *chan, *l;
977 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
979 kfree_skb(conn->rx_skb);
982 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
985 l2cap_chan_del(chan, err);
987 chan->ops->close(chan->data);
990 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
991 del_timer_sync(&conn->info_timer);
993 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
994 del_timer(&conn->security_timer);
996 hcon->l2cap_data = NULL;
1000 static void security_timeout(unsigned long arg)
1002 struct l2cap_conn *conn = (void *) arg;
1004 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1007 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1009 struct l2cap_conn *conn = hcon->l2cap_data;
1014 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1018 hcon->l2cap_data = conn;
1021 BT_DBG("hcon %p conn %p", hcon, conn);
1023 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1024 conn->mtu = hcon->hdev->le_mtu;
1026 conn->mtu = hcon->hdev->acl_mtu;
1028 conn->src = &hcon->hdev->bdaddr;
1029 conn->dst = &hcon->dst;
1031 conn->feat_mask = 0;
1033 spin_lock_init(&conn->lock);
1034 rwlock_init(&conn->chan_lock);
1036 INIT_LIST_HEAD(&conn->chan_l);
1038 if (hcon->type == LE_LINK)
1039 setup_timer(&conn->security_timer, security_timeout,
1040 (unsigned long) conn);
1042 setup_timer(&conn->info_timer, l2cap_info_timeout,
1043 (unsigned long) conn);
1045 conn->disc_reason = 0x13;
1050 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1052 write_lock_bh(&conn->chan_lock);
1053 __l2cap_chan_add(conn, chan);
1054 write_unlock_bh(&conn->chan_lock);
1057 /* ---- Socket interface ---- */
1059 /* Find socket with psm and source bdaddr.
1060 * Returns closest match.
1062 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1064 struct l2cap_chan *c, *c1 = NULL;
1066 read_lock(&chan_list_lock);
1068 list_for_each_entry(c, &chan_list, global_l) {
1069 struct sock *sk = c->sk;
1071 if (state && c->state != state)
1074 if (c->psm == psm) {
1076 if (!bacmp(&bt_sk(sk)->src, src)) {
1077 read_unlock(&chan_list_lock);
1082 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1087 read_unlock(&chan_list_lock);
1092 int l2cap_chan_connect(struct l2cap_chan *chan)
1094 struct sock *sk = chan->sk;
1095 bdaddr_t *src = &bt_sk(sk)->src;
1096 bdaddr_t *dst = &bt_sk(sk)->dst;
1097 struct l2cap_conn *conn;
1098 struct hci_conn *hcon;
1099 struct hci_dev *hdev;
1103 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1106 hdev = hci_get_route(dst, src);
1108 return -EHOSTUNREACH;
1110 hci_dev_lock_bh(hdev);
1112 auth_type = l2cap_get_auth_type(chan);
1114 if (chan->dcid == L2CAP_CID_LE_DATA)
1115 hcon = hci_connect(hdev, LE_LINK, dst,
1116 chan->sec_level, auth_type);
1118 hcon = hci_connect(hdev, ACL_LINK, dst,
1119 chan->sec_level, auth_type);
1122 err = PTR_ERR(hcon);
1126 conn = l2cap_conn_add(hcon, 0);
1133 /* Update source addr of the socket */
1134 bacpy(src, conn->src);
1136 l2cap_chan_add(conn, chan);
1138 l2cap_state_change(chan, BT_CONNECT);
1139 __set_chan_timer(chan, sk->sk_sndtimeo);
1141 if (hcon->state == BT_CONNECTED) {
1142 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1143 __clear_chan_timer(chan);
1144 if (l2cap_check_security(chan))
1145 l2cap_state_change(chan, BT_CONNECTED);
1147 l2cap_do_start(chan);
1153 hci_dev_unlock_bh(hdev);
1158 int __l2cap_wait_ack(struct sock *sk)
1160 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1161 DECLARE_WAITQUEUE(wait, current);
1165 add_wait_queue(sk_sleep(sk), &wait);
1166 while ((chan->unacked_frames > 0 && chan->conn)) {
1167 set_current_state(TASK_INTERRUPTIBLE);
1172 if (signal_pending(current)) {
1173 err = sock_intr_errno(timeo);
1178 timeo = schedule_timeout(timeo);
1181 err = sock_error(sk);
1185 set_current_state(TASK_RUNNING);
1186 remove_wait_queue(sk_sleep(sk), &wait);
1190 static void l2cap_monitor_timeout(unsigned long arg)
1192 struct l2cap_chan *chan = (void *) arg;
1193 struct sock *sk = chan->sk;
1195 BT_DBG("chan %p", chan);
1198 if (chan->retry_count >= chan->remote_max_tx) {
1199 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1204 chan->retry_count++;
1205 __set_monitor_timer(chan);
1207 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1211 static void l2cap_retrans_timeout(unsigned long arg)
1213 struct l2cap_chan *chan = (void *) arg;
1214 struct sock *sk = chan->sk;
1216 BT_DBG("chan %p", chan);
1219 chan->retry_count = 1;
1220 __set_monitor_timer(chan);
1222 set_bit(CONN_WAIT_F, &chan->conn_state);
1224 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1228 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1230 struct sk_buff *skb;
1232 while ((skb = skb_peek(&chan->tx_q)) &&
1233 chan->unacked_frames) {
1234 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1237 skb = skb_dequeue(&chan->tx_q);
1240 chan->unacked_frames--;
1243 if (!chan->unacked_frames)
1244 __clear_retrans_timer(chan);
1247 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1249 struct hci_conn *hcon = chan->conn->hcon;
1252 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1254 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1255 flags = ACL_START_NO_FLUSH;
1259 bt_cb(skb)->force_active = chan->force_active;
1260 hci_send_acl(hcon, skb, flags);
1263 void l2cap_streaming_send(struct l2cap_chan *chan)
1265 struct sk_buff *skb;
1268 while ((skb = skb_dequeue(&chan->tx_q))) {
1269 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1270 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1271 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1273 if (chan->fcs == L2CAP_FCS_CRC16) {
1274 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1275 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1278 l2cap_do_send(chan, skb);
1280 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1284 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1286 struct sk_buff *skb, *tx_skb;
1289 skb = skb_peek(&chan->tx_q);
1294 if (bt_cb(skb)->tx_seq == tx_seq)
1297 if (skb_queue_is_last(&chan->tx_q, skb))
1300 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1302 if (chan->remote_max_tx &&
1303 bt_cb(skb)->retries == chan->remote_max_tx) {
1304 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1308 tx_skb = skb_clone(skb, GFP_ATOMIC);
1309 bt_cb(skb)->retries++;
1310 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1311 control &= L2CAP_CTRL_SAR;
1313 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1314 control |= L2CAP_CTRL_FINAL;
1316 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1317 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1319 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1321 if (chan->fcs == L2CAP_FCS_CRC16) {
1322 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1323 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1326 l2cap_do_send(chan, tx_skb);
1329 int l2cap_ertm_send(struct l2cap_chan *chan)
1331 struct sk_buff *skb, *tx_skb;
1335 if (chan->state != BT_CONNECTED)
1338 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1340 if (chan->remote_max_tx &&
1341 bt_cb(skb)->retries == chan->remote_max_tx) {
1342 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1346 tx_skb = skb_clone(skb, GFP_ATOMIC);
1348 bt_cb(skb)->retries++;
1350 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1351 control &= L2CAP_CTRL_SAR;
1353 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1354 control |= L2CAP_CTRL_FINAL;
1356 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1357 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1358 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1361 if (chan->fcs == L2CAP_FCS_CRC16) {
1362 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1363 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1366 l2cap_do_send(chan, tx_skb);
1368 __set_retrans_timer(chan);
1370 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1371 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1373 if (bt_cb(skb)->retries == 1)
1374 chan->unacked_frames++;
1376 chan->frames_sent++;
1378 if (skb_queue_is_last(&chan->tx_q, skb))
1379 chan->tx_send_head = NULL;
1381 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1389 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1393 if (!skb_queue_empty(&chan->tx_q))
1394 chan->tx_send_head = chan->tx_q.next;
1396 chan->next_tx_seq = chan->expected_ack_seq;
1397 ret = l2cap_ertm_send(chan);
1401 static void l2cap_send_ack(struct l2cap_chan *chan)
1405 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1407 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1408 control |= L2CAP_SUPER_RCV_NOT_READY;
1409 set_bit(CONN_RNR_SENT, &chan->conn_state);
1410 l2cap_send_sframe(chan, control);
1414 if (l2cap_ertm_send(chan) > 0)
1417 control |= L2CAP_SUPER_RCV_READY;
1418 l2cap_send_sframe(chan, control);
1421 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1423 struct srej_list *tail;
1426 control = L2CAP_SUPER_SELECT_REJECT;
1427 control |= L2CAP_CTRL_FINAL;
1429 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1430 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1432 l2cap_send_sframe(chan, control);
1435 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1437 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1438 struct sk_buff **frag;
1441 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1447 /* Continuation fragments (no L2CAP header) */
1448 frag = &skb_shinfo(skb)->frag_list;
1450 count = min_t(unsigned int, conn->mtu, len);
1452 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1455 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1461 frag = &(*frag)->next;
1467 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1469 struct sock *sk = chan->sk;
1470 struct l2cap_conn *conn = chan->conn;
1471 struct sk_buff *skb;
1472 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1473 struct l2cap_hdr *lh;
1475 BT_DBG("sk %p len %d", sk, (int)len);
1477 count = min_t(unsigned int, (conn->mtu - hlen), len);
1478 skb = bt_skb_send_alloc(sk, count + hlen,
1479 msg->msg_flags & MSG_DONTWAIT, &err);
1481 return ERR_PTR(err);
1483 /* Create L2CAP header */
1484 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1485 lh->cid = cpu_to_le16(chan->dcid);
1486 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1487 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1489 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1490 if (unlikely(err < 0)) {
1492 return ERR_PTR(err);
1497 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1499 struct sock *sk = chan->sk;
1500 struct l2cap_conn *conn = chan->conn;
1501 struct sk_buff *skb;
1502 int err, count, hlen = L2CAP_HDR_SIZE;
1503 struct l2cap_hdr *lh;
1505 BT_DBG("sk %p len %d", sk, (int)len);
1507 count = min_t(unsigned int, (conn->mtu - hlen), len);
1508 skb = bt_skb_send_alloc(sk, count + hlen,
1509 msg->msg_flags & MSG_DONTWAIT, &err);
1511 return ERR_PTR(err);
1513 /* Create L2CAP header */
1514 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1515 lh->cid = cpu_to_le16(chan->dcid);
1516 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1518 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1519 if (unlikely(err < 0)) {
1521 return ERR_PTR(err);
1526 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1528 struct sock *sk = chan->sk;
1529 struct l2cap_conn *conn = chan->conn;
1530 struct sk_buff *skb;
1531 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1532 struct l2cap_hdr *lh;
1534 BT_DBG("sk %p len %d", sk, (int)len);
1537 return ERR_PTR(-ENOTCONN);
1542 if (chan->fcs == L2CAP_FCS_CRC16)
1545 count = min_t(unsigned int, (conn->mtu - hlen), len);
1546 skb = bt_skb_send_alloc(sk, count + hlen,
1547 msg->msg_flags & MSG_DONTWAIT, &err);
1549 return ERR_PTR(err);
1551 /* Create L2CAP header */
1552 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1553 lh->cid = cpu_to_le16(chan->dcid);
1554 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1555 put_unaligned_le16(control, skb_put(skb, 2));
1557 put_unaligned_le16(sdulen, skb_put(skb, 2));
1559 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1560 if (unlikely(err < 0)) {
1562 return ERR_PTR(err);
1565 if (chan->fcs == L2CAP_FCS_CRC16)
1566 put_unaligned_le16(0, skb_put(skb, 2));
1568 bt_cb(skb)->retries = 0;
1572 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1574 struct sk_buff *skb;
1575 struct sk_buff_head sar_queue;
1579 skb_queue_head_init(&sar_queue);
1580 control = L2CAP_SDU_START;
1581 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1583 return PTR_ERR(skb);
1585 __skb_queue_tail(&sar_queue, skb);
1586 len -= chan->remote_mps;
1587 size += chan->remote_mps;
1592 if (len > chan->remote_mps) {
1593 control = L2CAP_SDU_CONTINUE;
1594 buflen = chan->remote_mps;
1596 control = L2CAP_SDU_END;
1600 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1602 skb_queue_purge(&sar_queue);
1603 return PTR_ERR(skb);
1606 __skb_queue_tail(&sar_queue, skb);
1610 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1611 if (chan->tx_send_head == NULL)
1612 chan->tx_send_head = sar_queue.next;
1617 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1619 struct sk_buff *skb;
1623 /* Connectionless channel */
1624 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1625 skb = l2cap_create_connless_pdu(chan, msg, len);
1627 return PTR_ERR(skb);
1629 l2cap_do_send(chan, skb);
1633 switch (chan->mode) {
1634 case L2CAP_MODE_BASIC:
1635 /* Check outgoing MTU */
1636 if (len > chan->omtu)
1639 /* Create a basic PDU */
1640 skb = l2cap_create_basic_pdu(chan, msg, len);
1642 return PTR_ERR(skb);
1644 l2cap_do_send(chan, skb);
1648 case L2CAP_MODE_ERTM:
1649 case L2CAP_MODE_STREAMING:
1650 /* Entire SDU fits into one PDU */
1651 if (len <= chan->remote_mps) {
1652 control = L2CAP_SDU_UNSEGMENTED;
1653 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1656 return PTR_ERR(skb);
1658 __skb_queue_tail(&chan->tx_q, skb);
1660 if (chan->tx_send_head == NULL)
1661 chan->tx_send_head = skb;
1664 /* Segment SDU into multiples PDUs */
1665 err = l2cap_sar_segment_sdu(chan, msg, len);
1670 if (chan->mode == L2CAP_MODE_STREAMING) {
1671 l2cap_streaming_send(chan);
1676 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1677 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1682 err = l2cap_ertm_send(chan);
1689 BT_DBG("bad state %1.1x", chan->mode);
1696 /* Copy frame to all raw sockets on that connection */
1697 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1699 struct sk_buff *nskb;
1700 struct l2cap_chan *chan;
1702 BT_DBG("conn %p", conn);
1704 read_lock(&conn->chan_lock);
1705 list_for_each_entry(chan, &conn->chan_l, list) {
1706 struct sock *sk = chan->sk;
1707 if (chan->chan_type != L2CAP_CHAN_RAW)
1710 /* Don't send frame to the socket it came from */
1713 nskb = skb_clone(skb, GFP_ATOMIC);
1717 if (chan->ops->recv(chan->data, nskb))
1720 read_unlock(&conn->chan_lock);
1723 /* ---- L2CAP signalling commands ---- */
1724 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1725 u8 code, u8 ident, u16 dlen, void *data)
1727 struct sk_buff *skb, **frag;
1728 struct l2cap_cmd_hdr *cmd;
1729 struct l2cap_hdr *lh;
1732 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1733 conn, code, ident, dlen);
1735 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1736 count = min_t(unsigned int, conn->mtu, len);
1738 skb = bt_skb_alloc(count, GFP_ATOMIC);
1742 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1743 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1745 if (conn->hcon->type == LE_LINK)
1746 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1748 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1750 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1753 cmd->len = cpu_to_le16(dlen);
1756 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1757 memcpy(skb_put(skb, count), data, count);
1763 /* Continuation fragments (no L2CAP header) */
1764 frag = &skb_shinfo(skb)->frag_list;
1766 count = min_t(unsigned int, conn->mtu, len);
1768 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1772 memcpy(skb_put(*frag, count), data, count);
1777 frag = &(*frag)->next;
1787 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1789 struct l2cap_conf_opt *opt = *ptr;
1792 len = L2CAP_CONF_OPT_SIZE + opt->len;
1800 *val = *((u8 *) opt->val);
1804 *val = get_unaligned_le16(opt->val);
1808 *val = get_unaligned_le32(opt->val);
1812 *val = (unsigned long) opt->val;
1816 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1820 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1822 struct l2cap_conf_opt *opt = *ptr;
1824 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1831 *((u8 *) opt->val) = val;
1835 put_unaligned_le16(val, opt->val);
1839 put_unaligned_le32(val, opt->val);
1843 memcpy(opt->val, (void *) val, len);
1847 *ptr += L2CAP_CONF_OPT_SIZE + len;
1850 static void l2cap_ack_timeout(unsigned long arg)
1852 struct l2cap_chan *chan = (void *) arg;
1854 bh_lock_sock(chan->sk);
1855 l2cap_send_ack(chan);
1856 bh_unlock_sock(chan->sk);
1859 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1861 struct sock *sk = chan->sk;
1863 chan->expected_ack_seq = 0;
1864 chan->unacked_frames = 0;
1865 chan->buffer_seq = 0;
1866 chan->num_acked = 0;
1867 chan->frames_sent = 0;
1869 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1870 (unsigned long) chan);
1871 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1872 (unsigned long) chan);
1873 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1875 skb_queue_head_init(&chan->srej_q);
1876 skb_queue_head_init(&chan->busy_q);
1878 INIT_LIST_HEAD(&chan->srej_l);
1880 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1882 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1885 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1888 case L2CAP_MODE_STREAMING:
1889 case L2CAP_MODE_ERTM:
1890 if (l2cap_mode_supported(mode, remote_feat_mask))
1894 return L2CAP_MODE_BASIC;
1898 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1900 struct l2cap_conf_req *req = data;
1901 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1902 void *ptr = req->data;
1904 BT_DBG("chan %p", chan);
1906 if (chan->num_conf_req || chan->num_conf_rsp)
1909 switch (chan->mode) {
1910 case L2CAP_MODE_STREAMING:
1911 case L2CAP_MODE_ERTM:
1912 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1917 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1922 if (chan->imtu != L2CAP_DEFAULT_MTU)
1923 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1925 switch (chan->mode) {
1926 case L2CAP_MODE_BASIC:
1927 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1928 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1931 rfc.mode = L2CAP_MODE_BASIC;
1933 rfc.max_transmit = 0;
1934 rfc.retrans_timeout = 0;
1935 rfc.monitor_timeout = 0;
1936 rfc.max_pdu_size = 0;
1938 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1939 (unsigned long) &rfc);
1942 case L2CAP_MODE_ERTM:
1943 rfc.mode = L2CAP_MODE_ERTM;
1944 rfc.txwin_size = chan->tx_win;
1945 rfc.max_transmit = chan->max_tx;
1946 rfc.retrans_timeout = 0;
1947 rfc.monitor_timeout = 0;
1948 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1949 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1950 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1952 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1953 (unsigned long) &rfc);
1955 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1958 if (chan->fcs == L2CAP_FCS_NONE ||
1959 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1960 chan->fcs = L2CAP_FCS_NONE;
1961 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1965 case L2CAP_MODE_STREAMING:
1966 rfc.mode = L2CAP_MODE_STREAMING;
1968 rfc.max_transmit = 0;
1969 rfc.retrans_timeout = 0;
1970 rfc.monitor_timeout = 0;
1971 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1972 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1973 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1975 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1976 (unsigned long) &rfc);
1978 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1981 if (chan->fcs == L2CAP_FCS_NONE ||
1982 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1983 chan->fcs = L2CAP_FCS_NONE;
1984 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1989 req->dcid = cpu_to_le16(chan->dcid);
1990 req->flags = cpu_to_le16(0);
1995 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1997 struct l2cap_conf_rsp *rsp = data;
1998 void *ptr = rsp->data;
1999 void *req = chan->conf_req;
2000 int len = chan->conf_len;
2001 int type, hint, olen;
2003 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2004 u16 mtu = L2CAP_DEFAULT_MTU;
2005 u16 result = L2CAP_CONF_SUCCESS;
2007 BT_DBG("chan %p", chan);
2009 while (len >= L2CAP_CONF_OPT_SIZE) {
2010 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2012 hint = type & L2CAP_CONF_HINT;
2013 type &= L2CAP_CONF_MASK;
2016 case L2CAP_CONF_MTU:
2020 case L2CAP_CONF_FLUSH_TO:
2021 chan->flush_to = val;
2024 case L2CAP_CONF_QOS:
2027 case L2CAP_CONF_RFC:
2028 if (olen == sizeof(rfc))
2029 memcpy(&rfc, (void *) val, olen);
2032 case L2CAP_CONF_FCS:
2033 if (val == L2CAP_FCS_NONE)
2034 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2042 result = L2CAP_CONF_UNKNOWN;
2043 *((u8 *) ptr++) = type;
2048 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2051 switch (chan->mode) {
2052 case L2CAP_MODE_STREAMING:
2053 case L2CAP_MODE_ERTM:
2054 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2055 chan->mode = l2cap_select_mode(rfc.mode,
2056 chan->conn->feat_mask);
2060 if (chan->mode != rfc.mode)
2061 return -ECONNREFUSED;
2067 if (chan->mode != rfc.mode) {
2068 result = L2CAP_CONF_UNACCEPT;
2069 rfc.mode = chan->mode;
2071 if (chan->num_conf_rsp == 1)
2072 return -ECONNREFUSED;
2074 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2075 sizeof(rfc), (unsigned long) &rfc);
2079 if (result == L2CAP_CONF_SUCCESS) {
2080 /* Configure output options and let the other side know
2081 * which ones we don't like. */
2083 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2084 result = L2CAP_CONF_UNACCEPT;
2087 set_bit(CONF_MTU_DONE, &chan->conf_state);
2089 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2092 case L2CAP_MODE_BASIC:
2093 chan->fcs = L2CAP_FCS_NONE;
2094 set_bit(CONF_MODE_DONE, &chan->conf_state);
2097 case L2CAP_MODE_ERTM:
2098 chan->remote_tx_win = rfc.txwin_size;
2099 chan->remote_max_tx = rfc.max_transmit;
2101 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2102 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2104 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2106 rfc.retrans_timeout =
2107 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2108 rfc.monitor_timeout =
2109 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2111 set_bit(CONF_MODE_DONE, &chan->conf_state);
2113 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2114 sizeof(rfc), (unsigned long) &rfc);
2118 case L2CAP_MODE_STREAMING:
2119 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2120 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2122 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2124 set_bit(CONF_MODE_DONE, &chan->conf_state);
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2127 sizeof(rfc), (unsigned long) &rfc);
2132 result = L2CAP_CONF_UNACCEPT;
2134 memset(&rfc, 0, sizeof(rfc));
2135 rfc.mode = chan->mode;
2138 if (result == L2CAP_CONF_SUCCESS)
2139 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2141 rsp->scid = cpu_to_le16(chan->dcid);
2142 rsp->result = cpu_to_le16(result);
2143 rsp->flags = cpu_to_le16(0x0000);
2148 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2150 struct l2cap_conf_req *req = data;
2151 void *ptr = req->data;
2154 struct l2cap_conf_rfc rfc;
2156 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2158 while (len >= L2CAP_CONF_OPT_SIZE) {
2159 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2162 case L2CAP_CONF_MTU:
2163 if (val < L2CAP_DEFAULT_MIN_MTU) {
2164 *result = L2CAP_CONF_UNACCEPT;
2165 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2168 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2171 case L2CAP_CONF_FLUSH_TO:
2172 chan->flush_to = val;
2173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2177 case L2CAP_CONF_RFC:
2178 if (olen == sizeof(rfc))
2179 memcpy(&rfc, (void *)val, olen);
2181 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2182 rfc.mode != chan->mode)
2183 return -ECONNREFUSED;
2187 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2188 sizeof(rfc), (unsigned long) &rfc);
2193 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2194 return -ECONNREFUSED;
2196 chan->mode = rfc.mode;
2198 if (*result == L2CAP_CONF_SUCCESS) {
2200 case L2CAP_MODE_ERTM:
2201 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2202 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2203 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2205 case L2CAP_MODE_STREAMING:
2206 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2210 req->dcid = cpu_to_le16(chan->dcid);
2211 req->flags = cpu_to_le16(0x0000);
2216 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2218 struct l2cap_conf_rsp *rsp = data;
2219 void *ptr = rsp->data;
2221 BT_DBG("chan %p", chan);
2223 rsp->scid = cpu_to_le16(chan->dcid);
2224 rsp->result = cpu_to_le16(result);
2225 rsp->flags = cpu_to_le16(flags);
2230 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2232 struct l2cap_conn_rsp rsp;
2233 struct l2cap_conn *conn = chan->conn;
2236 rsp.scid = cpu_to_le16(chan->dcid);
2237 rsp.dcid = cpu_to_le16(chan->scid);
2238 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2239 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2240 l2cap_send_cmd(conn, chan->ident,
2241 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2243 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2246 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2247 l2cap_build_conf_req(chan, buf), buf);
2248 chan->num_conf_req++;
2251 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2255 struct l2cap_conf_rfc rfc;
2257 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2259 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2262 while (len >= L2CAP_CONF_OPT_SIZE) {
2263 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2266 case L2CAP_CONF_RFC:
2267 if (olen == sizeof(rfc))
2268 memcpy(&rfc, (void *)val, olen);
2275 case L2CAP_MODE_ERTM:
2276 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2277 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2278 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2280 case L2CAP_MODE_STREAMING:
2281 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2285 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2287 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2289 if (rej->reason != 0x0000)
2292 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2293 cmd->ident == conn->info_ident) {
2294 del_timer(&conn->info_timer);
2296 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2297 conn->info_ident = 0;
2299 l2cap_conn_start(conn);
2305 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2307 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2308 struct l2cap_conn_rsp rsp;
2309 struct l2cap_chan *chan = NULL, *pchan;
2310 struct sock *parent, *sk = NULL;
2311 int result, status = L2CAP_CS_NO_INFO;
2313 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2314 __le16 psm = req->psm;
2316 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2318 /* Check if we have socket listening on psm */
2319 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2321 result = L2CAP_CR_BAD_PSM;
2327 bh_lock_sock(parent);
2329 /* Check if the ACL is secure enough (if not SDP) */
2330 if (psm != cpu_to_le16(0x0001) &&
2331 !hci_conn_check_link_mode(conn->hcon)) {
2332 conn->disc_reason = 0x05;
2333 result = L2CAP_CR_SEC_BLOCK;
2337 result = L2CAP_CR_NO_MEM;
2339 /* Check for backlog size */
2340 if (sk_acceptq_is_full(parent)) {
2341 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2345 chan = pchan->ops->new_connection(pchan->data);
2351 write_lock_bh(&conn->chan_lock);
2353 /* Check if we already have channel with that dcid */
2354 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2355 write_unlock_bh(&conn->chan_lock);
2356 sock_set_flag(sk, SOCK_ZAPPED);
2357 chan->ops->close(chan->data);
2361 hci_conn_hold(conn->hcon);
2363 bacpy(&bt_sk(sk)->src, conn->src);
2364 bacpy(&bt_sk(sk)->dst, conn->dst);
2368 bt_accept_enqueue(parent, sk);
2370 __l2cap_chan_add(conn, chan);
2374 __set_chan_timer(chan, sk->sk_sndtimeo);
2376 chan->ident = cmd->ident;
2378 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2379 if (l2cap_check_security(chan)) {
2380 if (bt_sk(sk)->defer_setup) {
2381 l2cap_state_change(chan, BT_CONNECT2);
2382 result = L2CAP_CR_PEND;
2383 status = L2CAP_CS_AUTHOR_PEND;
2384 parent->sk_data_ready(parent, 0);
2386 l2cap_state_change(chan, BT_CONFIG);
2387 result = L2CAP_CR_SUCCESS;
2388 status = L2CAP_CS_NO_INFO;
2391 l2cap_state_change(chan, BT_CONNECT2);
2392 result = L2CAP_CR_PEND;
2393 status = L2CAP_CS_AUTHEN_PEND;
2396 l2cap_state_change(chan, BT_CONNECT2);
2397 result = L2CAP_CR_PEND;
2398 status = L2CAP_CS_NO_INFO;
2401 write_unlock_bh(&conn->chan_lock);
2404 bh_unlock_sock(parent);
2407 rsp.scid = cpu_to_le16(scid);
2408 rsp.dcid = cpu_to_le16(dcid);
2409 rsp.result = cpu_to_le16(result);
2410 rsp.status = cpu_to_le16(status);
2411 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2413 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2414 struct l2cap_info_req info;
2415 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2417 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2418 conn->info_ident = l2cap_get_ident(conn);
2420 mod_timer(&conn->info_timer, jiffies +
2421 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2423 l2cap_send_cmd(conn, conn->info_ident,
2424 L2CAP_INFO_REQ, sizeof(info), &info);
2427 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2428 result == L2CAP_CR_SUCCESS) {
2430 set_bit(CONF_REQ_SENT, &chan->conf_state);
2431 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2432 l2cap_build_conf_req(chan, buf), buf);
2433 chan->num_conf_req++;
2439 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2441 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2442 u16 scid, dcid, result, status;
2443 struct l2cap_chan *chan;
2447 scid = __le16_to_cpu(rsp->scid);
2448 dcid = __le16_to_cpu(rsp->dcid);
2449 result = __le16_to_cpu(rsp->result);
2450 status = __le16_to_cpu(rsp->status);
2452 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2455 chan = l2cap_get_chan_by_scid(conn, scid);
2459 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2467 case L2CAP_CR_SUCCESS:
2468 l2cap_state_change(chan, BT_CONFIG);
2471 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2473 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2477 l2cap_build_conf_req(chan, req), req);
2478 chan->num_conf_req++;
2482 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2486 /* don't delete l2cap channel if sk is owned by user */
2487 if (sock_owned_by_user(sk)) {
2488 l2cap_state_change(chan, BT_DISCONN);
2489 __clear_chan_timer(chan);
2490 __set_chan_timer(chan, HZ / 5);
2494 l2cap_chan_del(chan, ECONNREFUSED);
2502 static inline void set_default_fcs(struct l2cap_chan *chan)
2504 /* FCS is enabled only in ERTM or streaming mode, if one or both
2507 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2508 chan->fcs = L2CAP_FCS_NONE;
2509 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2510 chan->fcs = L2CAP_FCS_CRC16;
2513 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2515 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2518 struct l2cap_chan *chan;
2522 dcid = __le16_to_cpu(req->dcid);
2523 flags = __le16_to_cpu(req->flags);
2525 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2527 chan = l2cap_get_chan_by_scid(conn, dcid);
2533 if ((bt_sk(sk)->defer_setup && chan->state != BT_CONNECT2) ||
2534 (!bt_sk(sk)->defer_setup && chan->state != BT_CONFIG)) {
2535 struct l2cap_cmd_rej rej;
2537 rej.reason = cpu_to_le16(0x0002);
2538 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2543 /* Reject if config buffer is too small. */
2544 len = cmd_len - sizeof(*req);
2545 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2546 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2547 l2cap_build_conf_rsp(chan, rsp,
2548 L2CAP_CONF_REJECT, flags), rsp);
2553 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2554 chan->conf_len += len;
2556 if (flags & 0x0001) {
2557 /* Incomplete config. Send empty response. */
2558 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2559 l2cap_build_conf_rsp(chan, rsp,
2560 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2564 /* Complete config. */
2565 len = l2cap_parse_conf_req(chan, rsp);
2567 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2571 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2572 chan->num_conf_rsp++;
2574 /* Reset config buffer. */
2577 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2580 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2581 set_default_fcs(chan);
2583 l2cap_state_change(chan, BT_CONNECTED);
2585 chan->next_tx_seq = 0;
2586 chan->expected_tx_seq = 0;
2587 skb_queue_head_init(&chan->tx_q);
2588 if (chan->mode == L2CAP_MODE_ERTM)
2589 l2cap_ertm_init(chan);
2591 l2cap_chan_ready(sk);
2595 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2597 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2598 l2cap_build_conf_req(chan, buf), buf);
2599 chan->num_conf_req++;
2607 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2609 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2610 u16 scid, flags, result;
2611 struct l2cap_chan *chan;
2613 int len = cmd->len - sizeof(*rsp);
2615 scid = __le16_to_cpu(rsp->scid);
2616 flags = __le16_to_cpu(rsp->flags);
2617 result = __le16_to_cpu(rsp->result);
2619 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2620 scid, flags, result);
2622 chan = l2cap_get_chan_by_scid(conn, scid);
2629 case L2CAP_CONF_SUCCESS:
2630 l2cap_conf_rfc_get(chan, rsp->data, len);
2633 case L2CAP_CONF_UNACCEPT:
2634 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2637 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2638 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2642 /* throw out any old stored conf requests */
2643 result = L2CAP_CONF_SUCCESS;
2644 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2647 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2651 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2652 L2CAP_CONF_REQ, len, req);
2653 chan->num_conf_req++;
2654 if (result != L2CAP_CONF_SUCCESS)
2660 sk->sk_err = ECONNRESET;
2661 __set_chan_timer(chan, HZ * 5);
2662 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2669 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2671 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2672 set_default_fcs(chan);
2674 l2cap_state_change(chan, BT_CONNECTED);
2675 chan->next_tx_seq = 0;
2676 chan->expected_tx_seq = 0;
2677 skb_queue_head_init(&chan->tx_q);
2678 if (chan->mode == L2CAP_MODE_ERTM)
2679 l2cap_ertm_init(chan);
2681 l2cap_chan_ready(sk);
2689 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2691 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2692 struct l2cap_disconn_rsp rsp;
2694 struct l2cap_chan *chan;
2697 scid = __le16_to_cpu(req->scid);
2698 dcid = __le16_to_cpu(req->dcid);
2700 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2702 chan = l2cap_get_chan_by_scid(conn, dcid);
2708 rsp.dcid = cpu_to_le16(chan->scid);
2709 rsp.scid = cpu_to_le16(chan->dcid);
2710 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2712 sk->sk_shutdown = SHUTDOWN_MASK;
2714 /* don't delete l2cap channel if sk is owned by user */
2715 if (sock_owned_by_user(sk)) {
2716 l2cap_state_change(chan, BT_DISCONN);
2717 __clear_chan_timer(chan);
2718 __set_chan_timer(chan, HZ / 5);
2723 l2cap_chan_del(chan, ECONNRESET);
2726 chan->ops->close(chan->data);
2730 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2732 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2734 struct l2cap_chan *chan;
2737 scid = __le16_to_cpu(rsp->scid);
2738 dcid = __le16_to_cpu(rsp->dcid);
2740 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2742 chan = l2cap_get_chan_by_scid(conn, scid);
2748 /* don't delete l2cap channel if sk is owned by user */
2749 if (sock_owned_by_user(sk)) {
2750 l2cap_state_change(chan,BT_DISCONN);
2751 __clear_chan_timer(chan);
2752 __set_chan_timer(chan, HZ / 5);
2757 l2cap_chan_del(chan, 0);
2760 chan->ops->close(chan->data);
2764 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2766 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2769 type = __le16_to_cpu(req->type);
2771 BT_DBG("type 0x%4.4x", type);
2773 if (type == L2CAP_IT_FEAT_MASK) {
2775 u32 feat_mask = l2cap_feat_mask;
2776 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2777 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2778 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2780 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2782 put_unaligned_le32(feat_mask, rsp->data);
2783 l2cap_send_cmd(conn, cmd->ident,
2784 L2CAP_INFO_RSP, sizeof(buf), buf);
2785 } else if (type == L2CAP_IT_FIXED_CHAN) {
2787 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2788 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2789 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2790 memcpy(buf + 4, l2cap_fixed_chan, 8);
2791 l2cap_send_cmd(conn, cmd->ident,
2792 L2CAP_INFO_RSP, sizeof(buf), buf);
2794 struct l2cap_info_rsp rsp;
2795 rsp.type = cpu_to_le16(type);
2796 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2797 l2cap_send_cmd(conn, cmd->ident,
2798 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2804 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2806 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2809 type = __le16_to_cpu(rsp->type);
2810 result = __le16_to_cpu(rsp->result);
2812 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2814 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2815 if (cmd->ident != conn->info_ident ||
2816 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2819 del_timer(&conn->info_timer);
2821 if (result != L2CAP_IR_SUCCESS) {
2822 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2823 conn->info_ident = 0;
2825 l2cap_conn_start(conn);
2830 if (type == L2CAP_IT_FEAT_MASK) {
2831 conn->feat_mask = get_unaligned_le32(rsp->data);
2833 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2834 struct l2cap_info_req req;
2835 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2837 conn->info_ident = l2cap_get_ident(conn);
2839 l2cap_send_cmd(conn, conn->info_ident,
2840 L2CAP_INFO_REQ, sizeof(req), &req);
2842 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2843 conn->info_ident = 0;
2845 l2cap_conn_start(conn);
2847 } else if (type == L2CAP_IT_FIXED_CHAN) {
2848 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2849 conn->info_ident = 0;
2851 l2cap_conn_start(conn);
2857 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2862 if (min > max || min < 6 || max > 3200)
2865 if (to_multiplier < 10 || to_multiplier > 3200)
2868 if (max >= to_multiplier * 8)
2871 max_latency = (to_multiplier * 8 / max) - 1;
2872 if (latency > 499 || latency > max_latency)
2878 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2879 struct l2cap_cmd_hdr *cmd, u8 *data)
2881 struct hci_conn *hcon = conn->hcon;
2882 struct l2cap_conn_param_update_req *req;
2883 struct l2cap_conn_param_update_rsp rsp;
2884 u16 min, max, latency, to_multiplier, cmd_len;
2887 if (!(hcon->link_mode & HCI_LM_MASTER))
2890 cmd_len = __le16_to_cpu(cmd->len);
2891 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2894 req = (struct l2cap_conn_param_update_req *) data;
2895 min = __le16_to_cpu(req->min);
2896 max = __le16_to_cpu(req->max);
2897 latency = __le16_to_cpu(req->latency);
2898 to_multiplier = __le16_to_cpu(req->to_multiplier);
2900 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2901 min, max, latency, to_multiplier);
2903 memset(&rsp, 0, sizeof(rsp));
2905 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2907 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2909 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2911 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2915 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2920 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2921 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2925 switch (cmd->code) {
2926 case L2CAP_COMMAND_REJ:
2927 l2cap_command_rej(conn, cmd, data);
2930 case L2CAP_CONN_REQ:
2931 err = l2cap_connect_req(conn, cmd, data);
2934 case L2CAP_CONN_RSP:
2935 err = l2cap_connect_rsp(conn, cmd, data);
2938 case L2CAP_CONF_REQ:
2939 err = l2cap_config_req(conn, cmd, cmd_len, data);
2942 case L2CAP_CONF_RSP:
2943 err = l2cap_config_rsp(conn, cmd, data);
2946 case L2CAP_DISCONN_REQ:
2947 err = l2cap_disconnect_req(conn, cmd, data);
2950 case L2CAP_DISCONN_RSP:
2951 err = l2cap_disconnect_rsp(conn, cmd, data);
2954 case L2CAP_ECHO_REQ:
2955 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2958 case L2CAP_ECHO_RSP:
2961 case L2CAP_INFO_REQ:
2962 err = l2cap_information_req(conn, cmd, data);
2965 case L2CAP_INFO_RSP:
2966 err = l2cap_information_rsp(conn, cmd, data);
2970 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2978 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2979 struct l2cap_cmd_hdr *cmd, u8 *data)
2981 switch (cmd->code) {
2982 case L2CAP_COMMAND_REJ:
2985 case L2CAP_CONN_PARAM_UPDATE_REQ:
2986 return l2cap_conn_param_update_req(conn, cmd, data);
2988 case L2CAP_CONN_PARAM_UPDATE_RSP:
2992 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2997 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2998 struct sk_buff *skb)
3000 u8 *data = skb->data;
3002 struct l2cap_cmd_hdr cmd;
3005 l2cap_raw_recv(conn, skb);
3007 while (len >= L2CAP_CMD_HDR_SIZE) {
3009 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3010 data += L2CAP_CMD_HDR_SIZE;
3011 len -= L2CAP_CMD_HDR_SIZE;
3013 cmd_len = le16_to_cpu(cmd.len);
3015 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3017 if (cmd_len > len || !cmd.ident) {
3018 BT_DBG("corrupted command");
3022 if (conn->hcon->type == LE_LINK)
3023 err = l2cap_le_sig_cmd(conn, &cmd, data);
3025 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3028 struct l2cap_cmd_rej rej;
3030 BT_ERR("Wrong link type (%d)", err);
3032 /* FIXME: Map err to a valid reason */
3033 rej.reason = cpu_to_le16(0);
3034 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3044 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3046 u16 our_fcs, rcv_fcs;
3047 int hdr_size = L2CAP_HDR_SIZE + 2;
3049 if (chan->fcs == L2CAP_FCS_CRC16) {
3050 skb_trim(skb, skb->len - 2);
3051 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3052 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3054 if (our_fcs != rcv_fcs)
3060 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3064 chan->frames_sent = 0;
3066 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3068 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3069 control |= L2CAP_SUPER_RCV_NOT_READY;
3070 l2cap_send_sframe(chan, control);
3071 set_bit(CONN_RNR_SENT, &chan->conn_state);
3074 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3075 l2cap_retransmit_frames(chan);
3077 l2cap_ertm_send(chan);
3079 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3080 chan->frames_sent == 0) {
3081 control |= L2CAP_SUPER_RCV_READY;
3082 l2cap_send_sframe(chan, control);
3086 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3088 struct sk_buff *next_skb;
3089 int tx_seq_offset, next_tx_seq_offset;
3091 bt_cb(skb)->tx_seq = tx_seq;
3092 bt_cb(skb)->sar = sar;
3094 next_skb = skb_peek(&chan->srej_q);
3096 __skb_queue_tail(&chan->srej_q, skb);
3100 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3101 if (tx_seq_offset < 0)
3102 tx_seq_offset += 64;
3105 if (bt_cb(next_skb)->tx_seq == tx_seq)
3108 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3109 chan->buffer_seq) % 64;
3110 if (next_tx_seq_offset < 0)
3111 next_tx_seq_offset += 64;
3113 if (next_tx_seq_offset > tx_seq_offset) {
3114 __skb_queue_before(&chan->srej_q, next_skb, skb);
3118 if (skb_queue_is_last(&chan->srej_q, next_skb))
3121 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3123 __skb_queue_tail(&chan->srej_q, skb);
3128 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3130 struct sk_buff *_skb;
3133 switch (control & L2CAP_CTRL_SAR) {
3134 case L2CAP_SDU_UNSEGMENTED:
3135 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3138 return chan->ops->recv(chan->data, skb);
3140 case L2CAP_SDU_START:
3141 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3144 chan->sdu_len = get_unaligned_le16(skb->data);
3146 if (chan->sdu_len > chan->imtu)
3149 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3153 /* pull sdu_len bytes only after alloc, because of Local Busy
3154 * condition we have to be sure that this will be executed
3155 * only once, i.e., when alloc does not fail */
3158 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3160 set_bit(CONN_SAR_SDU, &chan->conn_state);
3161 chan->partial_sdu_len = skb->len;
3164 case L2CAP_SDU_CONTINUE:
3165 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3171 chan->partial_sdu_len += skb->len;
3172 if (chan->partial_sdu_len > chan->sdu_len)
3175 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3180 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3186 if (!test_bit(CONN_SAR_RETRY, &chan->conn_state)) {
3187 chan->partial_sdu_len += skb->len;
3189 if (chan->partial_sdu_len > chan->imtu)
3192 if (chan->partial_sdu_len != chan->sdu_len)
3195 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3198 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3200 set_bit(CONN_SAR_RETRY, &chan->conn_state);
3204 err = chan->ops->recv(chan->data, _skb);
3207 set_bit(CONN_SAR_RETRY, &chan->conn_state);
3211 clear_bit(CONN_SAR_RETRY, &chan->conn_state);
3212 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3214 kfree_skb(chan->sdu);
3222 kfree_skb(chan->sdu);
3226 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3231 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3233 struct sk_buff *skb;
3237 while ((skb = skb_dequeue(&chan->busy_q))) {
3238 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3239 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3241 skb_queue_head(&chan->busy_q, skb);
3245 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3248 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3251 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3252 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3253 l2cap_send_sframe(chan, control);
3254 chan->retry_count = 1;
3256 __clear_retrans_timer(chan);
3257 __set_monitor_timer(chan);
3259 set_bit(CONN_WAIT_F, &chan->conn_state);
3262 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3263 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3265 BT_DBG("chan %p, Exit local busy", chan);
3270 static void l2cap_busy_work(struct work_struct *work)
3272 DECLARE_WAITQUEUE(wait, current);
3273 struct l2cap_chan *chan =
3274 container_of(work, struct l2cap_chan, busy_work);
3275 struct sock *sk = chan->sk;
3276 int n_tries = 0, timeo = HZ/5, err;
3277 struct sk_buff *skb;
3281 add_wait_queue(sk_sleep(sk), &wait);
3282 while ((skb = skb_peek(&chan->busy_q))) {
3283 set_current_state(TASK_INTERRUPTIBLE);
3285 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3287 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3294 if (signal_pending(current)) {
3295 err = sock_intr_errno(timeo);
3300 timeo = schedule_timeout(timeo);
3303 err = sock_error(sk);
3307 if (l2cap_try_push_rx_skb(chan) == 0)
3311 set_current_state(TASK_RUNNING);
3312 remove_wait_queue(sk_sleep(sk), &wait);
3317 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3321 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3322 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3323 __skb_queue_tail(&chan->busy_q, skb);
3324 return l2cap_try_push_rx_skb(chan);
3329 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3331 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3335 /* Busy Condition */
3336 BT_DBG("chan %p, Enter local busy", chan);
3338 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3339 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3340 __skb_queue_tail(&chan->busy_q, skb);
3342 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3343 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3344 l2cap_send_sframe(chan, sctrl);
3346 set_bit(CONN_RNR_SENT, &chan->conn_state);
3348 __clear_ack_timer(chan);
3350 queue_work(_busy_wq, &chan->busy_work);
3355 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3357 struct sk_buff *_skb;
3361 * TODO: We have to notify the userland if some data is lost with the
3365 switch (control & L2CAP_CTRL_SAR) {
3366 case L2CAP_SDU_UNSEGMENTED:
3367 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3368 kfree_skb(chan->sdu);
3372 err = chan->ops->recv(chan->data, skb);
3378 case L2CAP_SDU_START:
3379 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3380 kfree_skb(chan->sdu);
3384 chan->sdu_len = get_unaligned_le16(skb->data);
3387 if (chan->sdu_len > chan->imtu) {
3392 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3398 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3400 set_bit(CONN_SAR_SDU, &chan->conn_state);
3401 chan->partial_sdu_len = skb->len;
3405 case L2CAP_SDU_CONTINUE:
3406 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3409 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3411 chan->partial_sdu_len += skb->len;
3412 if (chan->partial_sdu_len > chan->sdu_len)
3413 kfree_skb(chan->sdu);
3420 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3423 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3425 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3426 chan->partial_sdu_len += skb->len;
3428 if (chan->partial_sdu_len > chan->imtu)
3431 if (chan->partial_sdu_len == chan->sdu_len) {
3432 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3433 err = chan->ops->recv(chan->data, _skb);
3440 kfree_skb(chan->sdu);
3448 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3450 struct sk_buff *skb;
3453 while ((skb = skb_peek(&chan->srej_q))) {
3454 if (bt_cb(skb)->tx_seq != tx_seq)
3457 skb = skb_dequeue(&chan->srej_q);
3458 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3459 l2cap_ertm_reassembly_sdu(chan, skb, control);
3460 chan->buffer_seq_srej =
3461 (chan->buffer_seq_srej + 1) % 64;
3462 tx_seq = (tx_seq + 1) % 64;
3466 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3468 struct srej_list *l, *tmp;
3471 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3472 if (l->tx_seq == tx_seq) {
3477 control = L2CAP_SUPER_SELECT_REJECT;
3478 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3479 l2cap_send_sframe(chan, control);
3481 list_add_tail(&l->list, &chan->srej_l);
3485 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3487 struct srej_list *new;
3490 while (tx_seq != chan->expected_tx_seq) {
3491 control = L2CAP_SUPER_SELECT_REJECT;
3492 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3493 l2cap_send_sframe(chan, control);
3495 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3496 new->tx_seq = chan->expected_tx_seq;
3497 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3498 list_add_tail(&new->list, &chan->srej_l);
3500 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3503 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3505 u8 tx_seq = __get_txseq(rx_control);
3506 u8 req_seq = __get_reqseq(rx_control);
3507 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3508 int tx_seq_offset, expected_tx_seq_offset;
3509 int num_to_ack = (chan->tx_win/6) + 1;
3512 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3513 tx_seq, rx_control);
3515 if (L2CAP_CTRL_FINAL & rx_control &&
3516 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3517 __clear_monitor_timer(chan);
3518 if (chan->unacked_frames > 0)
3519 __set_retrans_timer(chan);
3520 clear_bit(CONN_WAIT_F, &chan->conn_state);
3523 chan->expected_ack_seq = req_seq;
3524 l2cap_drop_acked_frames(chan);
3526 if (tx_seq == chan->expected_tx_seq)
3529 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3530 if (tx_seq_offset < 0)
3531 tx_seq_offset += 64;
3533 /* invalid tx_seq */
3534 if (tx_seq_offset >= chan->tx_win) {
3535 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3539 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3542 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3543 struct srej_list *first;
3545 first = list_first_entry(&chan->srej_l,
3546 struct srej_list, list);
3547 if (tx_seq == first->tx_seq) {
3548 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3549 l2cap_check_srej_gap(chan, tx_seq);
3551 list_del(&first->list);
3554 if (list_empty(&chan->srej_l)) {
3555 chan->buffer_seq = chan->buffer_seq_srej;
3556 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3557 l2cap_send_ack(chan);
3558 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3561 struct srej_list *l;
3563 /* duplicated tx_seq */
3564 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3567 list_for_each_entry(l, &chan->srej_l, list) {
3568 if (l->tx_seq == tx_seq) {
3569 l2cap_resend_srejframe(chan, tx_seq);
3573 l2cap_send_srejframe(chan, tx_seq);
3576 expected_tx_seq_offset =
3577 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3578 if (expected_tx_seq_offset < 0)
3579 expected_tx_seq_offset += 64;
3581 /* duplicated tx_seq */
3582 if (tx_seq_offset < expected_tx_seq_offset)
3585 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3587 BT_DBG("chan %p, Enter SREJ", chan);
3589 INIT_LIST_HEAD(&chan->srej_l);
3590 chan->buffer_seq_srej = chan->buffer_seq;
3592 __skb_queue_head_init(&chan->srej_q);
3593 __skb_queue_head_init(&chan->busy_q);
3594 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3596 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3598 l2cap_send_srejframe(chan, tx_seq);
3600 __clear_ack_timer(chan);
3605 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3607 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3608 bt_cb(skb)->tx_seq = tx_seq;
3609 bt_cb(skb)->sar = sar;
3610 __skb_queue_tail(&chan->srej_q, skb);
3614 err = l2cap_push_rx_skb(chan, skb, rx_control);
3618 if (rx_control & L2CAP_CTRL_FINAL) {
3619 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3620 l2cap_retransmit_frames(chan);
3623 __set_ack_timer(chan);
3625 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3626 if (chan->num_acked == num_to_ack - 1)
3627 l2cap_send_ack(chan);
3636 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3638 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3641 chan->expected_ack_seq = __get_reqseq(rx_control);
3642 l2cap_drop_acked_frames(chan);
3644 if (rx_control & L2CAP_CTRL_POLL) {
3645 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3646 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3647 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3648 (chan->unacked_frames > 0))
3649 __set_retrans_timer(chan);
3651 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3652 l2cap_send_srejtail(chan);
3654 l2cap_send_i_or_rr_or_rnr(chan);
3657 } else if (rx_control & L2CAP_CTRL_FINAL) {
3658 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3660 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3661 l2cap_retransmit_frames(chan);
3664 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3665 (chan->unacked_frames > 0))
3666 __set_retrans_timer(chan);
3668 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3669 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3670 l2cap_send_ack(chan);
3672 l2cap_ertm_send(chan);
3676 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3678 u8 tx_seq = __get_reqseq(rx_control);
3680 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3682 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3684 chan->expected_ack_seq = tx_seq;
3685 l2cap_drop_acked_frames(chan);
3687 if (rx_control & L2CAP_CTRL_FINAL) {
3688 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3689 l2cap_retransmit_frames(chan);
3691 l2cap_retransmit_frames(chan);
3693 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3694 set_bit(CONN_REJ_ACT, &chan->conn_state);
3697 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3699 u8 tx_seq = __get_reqseq(rx_control);
3701 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3703 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3705 if (rx_control & L2CAP_CTRL_POLL) {
3706 chan->expected_ack_seq = tx_seq;
3707 l2cap_drop_acked_frames(chan);
3709 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3710 l2cap_retransmit_one_frame(chan, tx_seq);
3712 l2cap_ertm_send(chan);
3714 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3715 chan->srej_save_reqseq = tx_seq;
3716 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3718 } else if (rx_control & L2CAP_CTRL_FINAL) {
3719 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3720 chan->srej_save_reqseq == tx_seq)
3721 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3723 l2cap_retransmit_one_frame(chan, tx_seq);
3725 l2cap_retransmit_one_frame(chan, tx_seq);
3726 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3727 chan->srej_save_reqseq = tx_seq;
3728 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3733 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3735 u8 tx_seq = __get_reqseq(rx_control);
3737 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3739 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3740 chan->expected_ack_seq = tx_seq;
3741 l2cap_drop_acked_frames(chan);
3743 if (rx_control & L2CAP_CTRL_POLL)
3744 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3746 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3747 __clear_retrans_timer(chan);
3748 if (rx_control & L2CAP_CTRL_POLL)
3749 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3753 if (rx_control & L2CAP_CTRL_POLL)
3754 l2cap_send_srejtail(chan);
3756 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3759 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3761 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3763 if (L2CAP_CTRL_FINAL & rx_control &&
3764 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3765 __clear_monitor_timer(chan);
3766 if (chan->unacked_frames > 0)
3767 __set_retrans_timer(chan);
3768 clear_bit(CONN_WAIT_F, &chan->conn_state);
3771 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3772 case L2CAP_SUPER_RCV_READY:
3773 l2cap_data_channel_rrframe(chan, rx_control);
3776 case L2CAP_SUPER_REJECT:
3777 l2cap_data_channel_rejframe(chan, rx_control);
3780 case L2CAP_SUPER_SELECT_REJECT:
3781 l2cap_data_channel_srejframe(chan, rx_control);
3784 case L2CAP_SUPER_RCV_NOT_READY:
3785 l2cap_data_channel_rnrframe(chan, rx_control);
3793 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3795 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3798 int len, next_tx_seq_offset, req_seq_offset;
3800 control = get_unaligned_le16(skb->data);
3805 * We can just drop the corrupted I-frame here.
3806 * Receiver will miss it and start proper recovery
3807 * procedures and ask retransmission.
3809 if (l2cap_check_fcs(chan, skb))
3812 if (__is_sar_start(control) && __is_iframe(control))
3815 if (chan->fcs == L2CAP_FCS_CRC16)
3818 if (len > chan->mps) {
3819 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3823 req_seq = __get_reqseq(control);
3824 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3825 if (req_seq_offset < 0)
3826 req_seq_offset += 64;
3828 next_tx_seq_offset =
3829 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3830 if (next_tx_seq_offset < 0)
3831 next_tx_seq_offset += 64;
3833 /* check for invalid req-seq */
3834 if (req_seq_offset > next_tx_seq_offset) {
3835 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3839 if (__is_iframe(control)) {
3841 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3845 l2cap_data_channel_iframe(chan, control, skb);
3849 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3853 l2cap_data_channel_sframe(chan, control, skb);
3863 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3865 struct l2cap_chan *chan;
3866 struct sock *sk = NULL;
3871 chan = l2cap_get_chan_by_scid(conn, cid);
3873 BT_DBG("unknown cid 0x%4.4x", cid);
3879 BT_DBG("chan %p, len %d", chan, skb->len);
3881 if (chan->state != BT_CONNECTED)
3884 switch (chan->mode) {
3885 case L2CAP_MODE_BASIC:
3886 /* If socket recv buffers overflows we drop data here
3887 * which is *bad* because L2CAP has to be reliable.
3888 * But we don't have any other choice. L2CAP doesn't
3889 * provide flow control mechanism. */
3891 if (chan->imtu < skb->len)
3894 if (!chan->ops->recv(chan->data, skb))
3898 case L2CAP_MODE_ERTM:
3899 if (!sock_owned_by_user(sk)) {
3900 l2cap_ertm_data_rcv(sk, skb);
3902 if (sk_add_backlog(sk, skb))
3908 case L2CAP_MODE_STREAMING:
3909 control = get_unaligned_le16(skb->data);
3913 if (l2cap_check_fcs(chan, skb))
3916 if (__is_sar_start(control))
3919 if (chan->fcs == L2CAP_FCS_CRC16)
3922 if (len > chan->mps || len < 0 || __is_sframe(control))
3925 tx_seq = __get_txseq(control);
3927 if (chan->expected_tx_seq == tx_seq)
3928 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3930 chan->expected_tx_seq = (tx_seq + 1) % 64;
3932 l2cap_streaming_reassembly_sdu(chan, skb, control);
3937 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3951 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3953 struct sock *sk = NULL;
3954 struct l2cap_chan *chan;
3956 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3964 BT_DBG("sk %p, len %d", sk, skb->len);
3966 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3969 if (chan->imtu < skb->len)
3972 if (!chan->ops->recv(chan->data, skb))
3984 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3986 struct sock *sk = NULL;
3987 struct l2cap_chan *chan;
3989 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3997 BT_DBG("sk %p, len %d", sk, skb->len);
3999 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4002 if (chan->imtu < skb->len)
4005 if (!chan->ops->recv(chan->data, skb))
4017 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4019 struct l2cap_hdr *lh = (void *) skb->data;
4023 skb_pull(skb, L2CAP_HDR_SIZE);
4024 cid = __le16_to_cpu(lh->cid);
4025 len = __le16_to_cpu(lh->len);
4027 if (len != skb->len) {
4032 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4035 case L2CAP_CID_LE_SIGNALING:
4036 case L2CAP_CID_SIGNALING:
4037 l2cap_sig_channel(conn, skb);
4040 case L2CAP_CID_CONN_LESS:
4041 psm = get_unaligned_le16(skb->data);
4043 l2cap_conless_channel(conn, psm, skb);
4046 case L2CAP_CID_LE_DATA:
4047 l2cap_att_channel(conn, cid, skb);
4051 if (smp_sig_channel(conn, skb))
4052 l2cap_conn_del(conn->hcon, EACCES);
4056 l2cap_data_channel(conn, cid, skb);
4061 /* ---- L2CAP interface with lower layer (HCI) ---- */
4063 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4065 int exact = 0, lm1 = 0, lm2 = 0;
4066 struct l2cap_chan *c;
4068 if (type != ACL_LINK)
4071 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4073 /* Find listening sockets and check their link_mode */
4074 read_lock(&chan_list_lock);
4075 list_for_each_entry(c, &chan_list, global_l) {
4076 struct sock *sk = c->sk;
4078 if (c->state != BT_LISTEN)
4081 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4082 lm1 |= HCI_LM_ACCEPT;
4084 lm1 |= HCI_LM_MASTER;
4086 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4087 lm2 |= HCI_LM_ACCEPT;
4089 lm2 |= HCI_LM_MASTER;
4092 read_unlock(&chan_list_lock);
4094 return exact ? lm1 : lm2;
4097 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4099 struct l2cap_conn *conn;
4101 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4103 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4107 conn = l2cap_conn_add(hcon, status);
4109 l2cap_conn_ready(conn);
4111 l2cap_conn_del(hcon, bt_err(status));
4116 static int l2cap_disconn_ind(struct hci_conn *hcon)
4118 struct l2cap_conn *conn = hcon->l2cap_data;
4120 BT_DBG("hcon %p", hcon);
4122 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4125 return conn->disc_reason;
4128 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4130 BT_DBG("hcon %p reason %d", hcon, reason);
4132 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4135 l2cap_conn_del(hcon, bt_err(reason));
4140 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4142 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4145 if (encrypt == 0x00) {
4146 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4147 __clear_chan_timer(chan);
4148 __set_chan_timer(chan, HZ * 5);
4149 } else if (chan->sec_level == BT_SECURITY_HIGH)
4150 l2cap_chan_close(chan, ECONNREFUSED);
4152 if (chan->sec_level == BT_SECURITY_MEDIUM)
4153 __clear_chan_timer(chan);
4157 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4159 struct l2cap_conn *conn = hcon->l2cap_data;
4160 struct l2cap_chan *chan;
4165 BT_DBG("conn %p", conn);
4167 read_lock(&conn->chan_lock);
4169 list_for_each_entry(chan, &conn->chan_l, list) {
4170 struct sock *sk = chan->sk;
4174 BT_DBG("chan->scid %d", chan->scid);
4176 if (chan->scid == L2CAP_CID_LE_DATA) {
4177 if (!status && encrypt) {
4178 chan->sec_level = hcon->sec_level;
4179 del_timer(&conn->security_timer);
4180 l2cap_chan_ready(sk);
4187 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4192 if (!status && (chan->state == BT_CONNECTED ||
4193 chan->state == BT_CONFIG)) {
4194 l2cap_check_encryption(chan, encrypt);
4199 if (chan->state == BT_CONNECT) {
4201 struct l2cap_conn_req req;
4202 req.scid = cpu_to_le16(chan->scid);
4203 req.psm = chan->psm;
4205 chan->ident = l2cap_get_ident(conn);
4206 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4208 l2cap_send_cmd(conn, chan->ident,
4209 L2CAP_CONN_REQ, sizeof(req), &req);
4211 __clear_chan_timer(chan);
4212 __set_chan_timer(chan, HZ / 10);
4214 } else if (chan->state == BT_CONNECT2) {
4215 struct l2cap_conn_rsp rsp;
4219 if (bt_sk(sk)->defer_setup) {
4220 struct sock *parent = bt_sk(sk)->parent;
4221 res = L2CAP_CR_PEND;
4222 stat = L2CAP_CS_AUTHOR_PEND;
4223 parent->sk_data_ready(parent, 0);
4225 l2cap_state_change(chan, BT_CONFIG);
4226 res = L2CAP_CR_SUCCESS;
4227 stat = L2CAP_CS_NO_INFO;
4230 l2cap_state_change(chan, BT_DISCONN);
4231 __set_chan_timer(chan, HZ / 10);
4232 res = L2CAP_CR_SEC_BLOCK;
4233 stat = L2CAP_CS_NO_INFO;
4236 rsp.scid = cpu_to_le16(chan->dcid);
4237 rsp.dcid = cpu_to_le16(chan->scid);
4238 rsp.result = cpu_to_le16(res);
4239 rsp.status = cpu_to_le16(stat);
4240 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4247 read_unlock(&conn->chan_lock);
4252 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4254 struct l2cap_conn *conn = hcon->l2cap_data;
4257 conn = l2cap_conn_add(hcon, 0);
4262 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4264 if (!(flags & ACL_CONT)) {
4265 struct l2cap_hdr *hdr;
4266 struct l2cap_chan *chan;
4271 BT_ERR("Unexpected start frame (len %d)", skb->len);
4272 kfree_skb(conn->rx_skb);
4273 conn->rx_skb = NULL;
4275 l2cap_conn_unreliable(conn, ECOMM);
4278 /* Start fragment always begin with Basic L2CAP header */
4279 if (skb->len < L2CAP_HDR_SIZE) {
4280 BT_ERR("Frame is too short (len %d)", skb->len);
4281 l2cap_conn_unreliable(conn, ECOMM);
4285 hdr = (struct l2cap_hdr *) skb->data;
4286 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4287 cid = __le16_to_cpu(hdr->cid);
4289 if (len == skb->len) {
4290 /* Complete frame received */
4291 l2cap_recv_frame(conn, skb);
4295 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4297 if (skb->len > len) {
4298 BT_ERR("Frame is too long (len %d, expected len %d)",
4300 l2cap_conn_unreliable(conn, ECOMM);
4304 chan = l2cap_get_chan_by_scid(conn, cid);
4306 if (chan && chan->sk) {
4307 struct sock *sk = chan->sk;
4309 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4310 BT_ERR("Frame exceeding recv MTU (len %d, "
4314 l2cap_conn_unreliable(conn, ECOMM);
4320 /* Allocate skb for the complete frame (with header) */
4321 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4325 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4327 conn->rx_len = len - skb->len;
4329 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4331 if (!conn->rx_len) {
4332 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4333 l2cap_conn_unreliable(conn, ECOMM);
4337 if (skb->len > conn->rx_len) {
4338 BT_ERR("Fragment is too long (len %d, expected %d)",
4339 skb->len, conn->rx_len);
4340 kfree_skb(conn->rx_skb);
4341 conn->rx_skb = NULL;
4343 l2cap_conn_unreliable(conn, ECOMM);
4347 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4349 conn->rx_len -= skb->len;
4351 if (!conn->rx_len) {
4352 /* Complete frame received */
4353 l2cap_recv_frame(conn, conn->rx_skb);
4354 conn->rx_skb = NULL;
4363 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4365 struct l2cap_chan *c;
4367 read_lock_bh(&chan_list_lock);
4369 list_for_each_entry(c, &chan_list, global_l) {
4370 struct sock *sk = c->sk;
4372 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4373 batostr(&bt_sk(sk)->src),
4374 batostr(&bt_sk(sk)->dst),
4375 c->state, __le16_to_cpu(c->psm),
4376 c->scid, c->dcid, c->imtu, c->omtu,
4377 c->sec_level, c->mode);
4380 read_unlock_bh(&chan_list_lock);
4385 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4387 return single_open(file, l2cap_debugfs_show, inode->i_private);
4390 static const struct file_operations l2cap_debugfs_fops = {
4391 .open = l2cap_debugfs_open,
4393 .llseek = seq_lseek,
4394 .release = single_release,
4397 static struct dentry *l2cap_debugfs;
4399 static struct hci_proto l2cap_hci_proto = {
4401 .id = HCI_PROTO_L2CAP,
4402 .connect_ind = l2cap_connect_ind,
4403 .connect_cfm = l2cap_connect_cfm,
4404 .disconn_ind = l2cap_disconn_ind,
4405 .disconn_cfm = l2cap_disconn_cfm,
4406 .security_cfm = l2cap_security_cfm,
4407 .recv_acldata = l2cap_recv_acldata
4410 int __init l2cap_init(void)
4414 err = l2cap_init_sockets();
4418 _busy_wq = create_singlethread_workqueue("l2cap");
4424 err = hci_register_proto(&l2cap_hci_proto);
4426 BT_ERR("L2CAP protocol registration failed");
4427 bt_sock_unregister(BTPROTO_L2CAP);
4432 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4433 bt_debugfs, NULL, &l2cap_debugfs_fops);
4435 BT_ERR("Failed to create L2CAP debug file");
4441 destroy_workqueue(_busy_wq);
4442 l2cap_cleanup_sockets();
4446 void l2cap_exit(void)
4448 debugfs_remove(l2cap_debugfs);
4450 flush_workqueue(_busy_wq);
4451 destroy_workqueue(_busy_wq);
4453 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4454 BT_ERR("L2CAP protocol unregistration failed");
4456 l2cap_cleanup_sockets();
4459 module_param(disable_ertm, bool, 0644);
4460 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");