2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
93 struct l2cap_chan *c, *r = NULL;
97 list_for_each_entry_rcu(c, &conn->chan_l, list) {
108 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
110 struct l2cap_chan *c, *r = NULL;
114 list_for_each_entry_rcu(c, &conn->chan_l, list) {
115 if (c->scid == cid) {
125 /* Find channel with given SCID.
126 * Returns locked socket */
127 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
129 struct l2cap_chan *c;
131 c = __l2cap_get_chan_by_scid(conn, cid);
137 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139 struct l2cap_chan *c, *r = NULL;
143 list_for_each_entry_rcu(c, &conn->chan_l, list) {
144 if (c->ident == ident) {
154 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
156 struct l2cap_chan *c;
158 c = __l2cap_get_chan_by_ident(conn, ident);
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 struct l2cap_chan *c;
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
179 write_lock_bh(&chan_list_lock);
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
204 write_unlock_bh(&chan_list_lock);
208 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 write_lock_bh(&chan_list_lock);
214 write_unlock_bh(&chan_list_lock);
219 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 u16 cid = L2CAP_CID_DYN_START;
223 for (; cid < L2CAP_CID_DYN_END; cid++) {
224 if (!__l2cap_get_chan_by_scid(conn, cid))
231 static char *state_to_string(int state)
235 return "BT_CONNECTED";
245 return "BT_CONNECT2";
254 return "invalid state";
257 static void l2cap_state_change(struct l2cap_chan *chan, int state)
259 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
260 state_to_string(state));
263 chan->ops->state_change(chan->data, state);
266 static void l2cap_chan_timeout(struct work_struct *work)
268 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
270 struct sock *sk = chan->sk;
273 BT_DBG("chan %p state %d", chan, chan->state);
277 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
278 reason = ECONNREFUSED;
279 else if (chan->state == BT_CONNECT &&
280 chan->sec_level != BT_SECURITY_SDP)
281 reason = ECONNREFUSED;
285 l2cap_chan_close(chan, reason);
289 chan->ops->close(chan->data);
293 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
295 struct l2cap_chan *chan;
297 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
303 write_lock_bh(&chan_list_lock);
304 list_add(&chan->global_l, &chan_list);
305 write_unlock_bh(&chan_list_lock);
307 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
309 chan->state = BT_OPEN;
311 atomic_set(&chan->refcnt, 1);
313 BT_DBG("sk %p chan %p", sk, chan);
318 void l2cap_chan_destroy(struct l2cap_chan *chan)
320 write_lock_bh(&chan_list_lock);
321 list_del(&chan->global_l);
322 write_unlock_bh(&chan_list_lock);
327 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
329 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
330 chan->psm, chan->dcid);
332 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
336 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
337 if (conn->hcon->type == LE_LINK) {
339 chan->omtu = L2CAP_LE_DEFAULT_MTU;
340 chan->scid = L2CAP_CID_LE_DATA;
341 chan->dcid = L2CAP_CID_LE_DATA;
343 /* Alloc CID for connection-oriented socket */
344 chan->scid = l2cap_alloc_cid(conn);
345 chan->omtu = L2CAP_DEFAULT_MTU;
347 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
348 /* Connectionless socket */
349 chan->scid = L2CAP_CID_CONN_LESS;
350 chan->dcid = L2CAP_CID_CONN_LESS;
351 chan->omtu = L2CAP_DEFAULT_MTU;
353 /* Raw socket can send/recv signalling messages only */
354 chan->scid = L2CAP_CID_SIGNALING;
355 chan->dcid = L2CAP_CID_SIGNALING;
356 chan->omtu = L2CAP_DEFAULT_MTU;
359 chan->local_id = L2CAP_BESTEFFORT_ID;
360 chan->local_stype = L2CAP_SERV_BESTEFFORT;
361 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
362 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
363 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
364 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
368 list_add_rcu(&chan->list, &conn->chan_l);
372 * Must be called on the locked socket. */
373 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
375 struct sock *sk = chan->sk;
376 struct l2cap_conn *conn = chan->conn;
377 struct sock *parent = bt_sk(sk)->parent;
379 __clear_chan_timer(chan);
381 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
384 /* Delete from channel list */
385 list_del_rcu(&chan->list);
391 hci_conn_put(conn->hcon);
394 l2cap_state_change(chan, BT_CLOSED);
395 sock_set_flag(sk, SOCK_ZAPPED);
401 bt_accept_unlink(sk);
402 parent->sk_data_ready(parent, 0);
404 sk->sk_state_change(sk);
406 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
407 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
410 skb_queue_purge(&chan->tx_q);
412 if (chan->mode == L2CAP_MODE_ERTM) {
413 struct srej_list *l, *tmp;
415 __clear_retrans_timer(chan);
416 __clear_monitor_timer(chan);
417 __clear_ack_timer(chan);
419 skb_queue_purge(&chan->srej_q);
421 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
428 static void l2cap_chan_cleanup_listen(struct sock *parent)
432 BT_DBG("parent %p", parent);
434 /* Close not yet accepted channels */
435 while ((sk = bt_accept_dequeue(parent, NULL))) {
436 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
437 __clear_chan_timer(chan);
439 l2cap_chan_close(chan, ECONNRESET);
441 chan->ops->close(chan->data);
445 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
447 struct l2cap_conn *conn = chan->conn;
448 struct sock *sk = chan->sk;
450 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
452 switch (chan->state) {
454 l2cap_chan_cleanup_listen(sk);
456 l2cap_state_change(chan, BT_CLOSED);
457 sock_set_flag(sk, SOCK_ZAPPED);
462 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
463 conn->hcon->type == ACL_LINK) {
464 __clear_chan_timer(chan);
465 __set_chan_timer(chan, sk->sk_sndtimeo);
466 l2cap_send_disconn_req(conn, chan, reason);
468 l2cap_chan_del(chan, reason);
472 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
473 conn->hcon->type == ACL_LINK) {
474 struct l2cap_conn_rsp rsp;
477 if (bt_sk(sk)->defer_setup)
478 result = L2CAP_CR_SEC_BLOCK;
480 result = L2CAP_CR_BAD_PSM;
481 l2cap_state_change(chan, BT_DISCONN);
483 rsp.scid = cpu_to_le16(chan->dcid);
484 rsp.dcid = cpu_to_le16(chan->scid);
485 rsp.result = cpu_to_le16(result);
486 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
487 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
491 l2cap_chan_del(chan, reason);
496 l2cap_chan_del(chan, reason);
500 sock_set_flag(sk, SOCK_ZAPPED);
505 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
507 if (chan->chan_type == L2CAP_CHAN_RAW) {
508 switch (chan->sec_level) {
509 case BT_SECURITY_HIGH:
510 return HCI_AT_DEDICATED_BONDING_MITM;
511 case BT_SECURITY_MEDIUM:
512 return HCI_AT_DEDICATED_BONDING;
514 return HCI_AT_NO_BONDING;
516 } else if (chan->psm == cpu_to_le16(0x0001)) {
517 if (chan->sec_level == BT_SECURITY_LOW)
518 chan->sec_level = BT_SECURITY_SDP;
520 if (chan->sec_level == BT_SECURITY_HIGH)
521 return HCI_AT_NO_BONDING_MITM;
523 return HCI_AT_NO_BONDING;
525 switch (chan->sec_level) {
526 case BT_SECURITY_HIGH:
527 return HCI_AT_GENERAL_BONDING_MITM;
528 case BT_SECURITY_MEDIUM:
529 return HCI_AT_GENERAL_BONDING;
531 return HCI_AT_NO_BONDING;
536 /* Service level security */
537 int l2cap_chan_check_security(struct l2cap_chan *chan)
539 struct l2cap_conn *conn = chan->conn;
542 auth_type = l2cap_get_auth_type(chan);
544 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
547 static u8 l2cap_get_ident(struct l2cap_conn *conn)
551 /* Get next available identificator.
552 * 1 - 128 are used by kernel.
553 * 129 - 199 are reserved.
554 * 200 - 254 are used by utilities like l2ping, etc.
557 spin_lock_bh(&conn->lock);
559 if (++conn->tx_ident > 128)
564 spin_unlock_bh(&conn->lock);
569 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
571 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
574 BT_DBG("code 0x%2.2x", code);
579 if (lmp_no_flush_capable(conn->hcon->hdev))
580 flags = ACL_START_NO_FLUSH;
584 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
585 skb->priority = HCI_PRIO_MAX;
587 hci_send_acl(conn->hchan, skb, flags);
590 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
592 struct hci_conn *hcon = chan->conn->hcon;
595 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
598 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
599 lmp_no_flush_capable(hcon->hdev))
600 flags = ACL_START_NO_FLUSH;
604 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
605 hci_send_acl(chan->conn->hchan, skb, flags);
608 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
611 struct l2cap_hdr *lh;
612 struct l2cap_conn *conn = chan->conn;
615 if (chan->state != BT_CONNECTED)
618 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
619 hlen = L2CAP_EXT_HDR_SIZE;
621 hlen = L2CAP_ENH_HDR_SIZE;
623 if (chan->fcs == L2CAP_FCS_CRC16)
624 hlen += L2CAP_FCS_SIZE;
626 BT_DBG("chan %p, control 0x%8.8x", chan, control);
628 count = min_t(unsigned int, conn->mtu, hlen);
630 control |= __set_sframe(chan);
632 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
633 control |= __set_ctrl_final(chan);
635 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
636 control |= __set_ctrl_poll(chan);
638 skb = bt_skb_alloc(count, GFP_ATOMIC);
642 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
643 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
644 lh->cid = cpu_to_le16(chan->dcid);
646 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
648 if (chan->fcs == L2CAP_FCS_CRC16) {
649 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
650 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
653 skb->priority = HCI_PRIO_MAX;
654 l2cap_do_send(chan, skb);
657 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
659 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
660 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
661 set_bit(CONN_RNR_SENT, &chan->conn_state);
663 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
665 control |= __set_reqseq(chan, chan->buffer_seq);
667 l2cap_send_sframe(chan, control);
670 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
672 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
675 static void l2cap_do_start(struct l2cap_chan *chan)
677 struct l2cap_conn *conn = chan->conn;
679 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
680 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
683 if (l2cap_chan_check_security(chan) &&
684 __l2cap_no_conn_pending(chan)) {
685 struct l2cap_conn_req req;
686 req.scid = cpu_to_le16(chan->scid);
689 chan->ident = l2cap_get_ident(conn);
690 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
692 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
696 struct l2cap_info_req req;
697 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
699 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
700 conn->info_ident = l2cap_get_ident(conn);
702 schedule_delayed_work(&conn->info_timer,
703 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
705 l2cap_send_cmd(conn, conn->info_ident,
706 L2CAP_INFO_REQ, sizeof(req), &req);
710 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
712 u32 local_feat_mask = l2cap_feat_mask;
714 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
717 case L2CAP_MODE_ERTM:
718 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
719 case L2CAP_MODE_STREAMING:
720 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
726 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
729 struct l2cap_disconn_req req;
736 if (chan->mode == L2CAP_MODE_ERTM) {
737 __clear_retrans_timer(chan);
738 __clear_monitor_timer(chan);
739 __clear_ack_timer(chan);
742 req.dcid = cpu_to_le16(chan->dcid);
743 req.scid = cpu_to_le16(chan->scid);
744 l2cap_send_cmd(conn, l2cap_get_ident(conn),
745 L2CAP_DISCONN_REQ, sizeof(req), &req);
747 l2cap_state_change(chan, BT_DISCONN);
751 /* ---- L2CAP connections ---- */
752 static void l2cap_conn_start(struct l2cap_conn *conn)
754 struct l2cap_chan *chan;
756 BT_DBG("conn %p", conn);
760 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
761 struct sock *sk = chan->sk;
765 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
770 if (chan->state == BT_CONNECT) {
771 struct l2cap_conn_req req;
773 if (!l2cap_chan_check_security(chan) ||
774 !__l2cap_no_conn_pending(chan)) {
779 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
780 && test_bit(CONF_STATE2_DEVICE,
781 &chan->conf_state)) {
782 /* l2cap_chan_close() calls list_del(chan)
783 * so release the lock */
784 l2cap_chan_close(chan, ECONNRESET);
789 req.scid = cpu_to_le16(chan->scid);
792 chan->ident = l2cap_get_ident(conn);
793 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
795 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
798 } else if (chan->state == BT_CONNECT2) {
799 struct l2cap_conn_rsp rsp;
801 rsp.scid = cpu_to_le16(chan->dcid);
802 rsp.dcid = cpu_to_le16(chan->scid);
804 if (l2cap_chan_check_security(chan)) {
805 if (bt_sk(sk)->defer_setup) {
806 struct sock *parent = bt_sk(sk)->parent;
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
810 parent->sk_data_ready(parent, 0);
813 l2cap_state_change(chan, BT_CONFIG);
814 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
815 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
818 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
819 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
822 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
825 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
826 rsp.result != L2CAP_CR_SUCCESS) {
831 set_bit(CONF_REQ_SENT, &chan->conf_state);
832 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
833 l2cap_build_conf_req(chan, buf), buf);
834 chan->num_conf_req++;
843 /* Find socket with cid and source bdaddr.
844 * Returns closest match, locked.
846 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
848 struct l2cap_chan *c, *c1 = NULL;
850 read_lock(&chan_list_lock);
852 list_for_each_entry(c, &chan_list, global_l) {
853 struct sock *sk = c->sk;
855 if (state && c->state != state)
858 if (c->scid == cid) {
860 if (!bacmp(&bt_sk(sk)->src, src)) {
861 read_unlock(&chan_list_lock);
866 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
871 read_unlock(&chan_list_lock);
876 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
878 struct sock *parent, *sk;
879 struct l2cap_chan *chan, *pchan;
883 /* Check if we have socket listening on cid */
884 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
893 /* Check for backlog size */
894 if (sk_acceptq_is_full(parent)) {
895 BT_DBG("backlog full %d", parent->sk_ack_backlog);
899 chan = pchan->ops->new_connection(pchan->data);
905 hci_conn_hold(conn->hcon);
907 bacpy(&bt_sk(sk)->src, conn->src);
908 bacpy(&bt_sk(sk)->dst, conn->dst);
910 bt_accept_enqueue(parent, sk);
912 l2cap_chan_add(conn, chan);
914 __set_chan_timer(chan, sk->sk_sndtimeo);
916 l2cap_state_change(chan, BT_CONNECTED);
917 parent->sk_data_ready(parent, 0);
920 release_sock(parent);
923 static void l2cap_chan_ready(struct sock *sk)
925 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
926 struct sock *parent = bt_sk(sk)->parent;
928 BT_DBG("sk %p, parent %p", sk, parent);
930 chan->conf_state = 0;
931 __clear_chan_timer(chan);
933 l2cap_state_change(chan, BT_CONNECTED);
934 sk->sk_state_change(sk);
937 parent->sk_data_ready(parent, 0);
940 static void l2cap_conn_ready(struct l2cap_conn *conn)
942 struct l2cap_chan *chan;
944 BT_DBG("conn %p", conn);
946 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
947 l2cap_le_conn_ready(conn);
949 if (conn->hcon->out && conn->hcon->type == LE_LINK)
950 smp_conn_security(conn, conn->hcon->pending_sec_level);
954 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
955 struct sock *sk = chan->sk;
959 if (conn->hcon->type == LE_LINK) {
960 if (smp_conn_security(conn, chan->sec_level))
961 l2cap_chan_ready(sk);
963 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
964 __clear_chan_timer(chan);
965 l2cap_state_change(chan, BT_CONNECTED);
966 sk->sk_state_change(sk);
968 } else if (chan->state == BT_CONNECT)
969 l2cap_do_start(chan);
977 /* Notify sockets that we cannot guaranty reliability anymore */
978 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
980 struct l2cap_chan *chan;
982 BT_DBG("conn %p", conn);
986 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
987 struct sock *sk = chan->sk;
989 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
996 static void l2cap_info_timeout(struct work_struct *work)
998 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1001 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1002 conn->info_ident = 0;
1004 l2cap_conn_start(conn);
1007 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1009 struct l2cap_conn *conn = hcon->l2cap_data;
1010 struct l2cap_chan *chan, *l;
1016 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1018 kfree_skb(conn->rx_skb);
1021 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1024 l2cap_chan_del(chan, err);
1026 chan->ops->close(chan->data);
1029 hci_chan_del(conn->hchan);
1031 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1032 cancel_delayed_work_sync(&conn->info_timer);
1034 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1035 cancel_delayed_work_sync(&conn->security_timer);
1036 smp_chan_destroy(conn);
1039 hcon->l2cap_data = NULL;
1043 static void security_timeout(struct work_struct *work)
1045 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1046 security_timer.work);
1048 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1051 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1053 struct l2cap_conn *conn = hcon->l2cap_data;
1054 struct hci_chan *hchan;
1059 hchan = hci_chan_create(hcon);
1063 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1065 hci_chan_del(hchan);
1069 hcon->l2cap_data = conn;
1071 conn->hchan = hchan;
1073 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1075 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1076 conn->mtu = hcon->hdev->le_mtu;
1078 conn->mtu = hcon->hdev->acl_mtu;
1080 conn->src = &hcon->hdev->bdaddr;
1081 conn->dst = &hcon->dst;
1083 conn->feat_mask = 0;
1085 spin_lock_init(&conn->lock);
1087 INIT_LIST_HEAD(&conn->chan_l);
1089 if (hcon->type == LE_LINK)
1090 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1092 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1094 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1099 /* ---- Socket interface ---- */
1101 /* Find socket with psm and source bdaddr.
1102 * Returns closest match.
1104 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1106 struct l2cap_chan *c, *c1 = NULL;
1108 read_lock(&chan_list_lock);
1110 list_for_each_entry(c, &chan_list, global_l) {
1111 struct sock *sk = c->sk;
1113 if (state && c->state != state)
1116 if (c->psm == psm) {
1118 if (!bacmp(&bt_sk(sk)->src, src)) {
1119 read_unlock(&chan_list_lock);
1124 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1129 read_unlock(&chan_list_lock);
1134 inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1136 struct sock *sk = chan->sk;
1137 bdaddr_t *src = &bt_sk(sk)->src;
1138 struct l2cap_conn *conn;
1139 struct hci_conn *hcon;
1140 struct hci_dev *hdev;
1144 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1147 hdev = hci_get_route(dst, src);
1149 return -EHOSTUNREACH;
1155 /* PSM must be odd and lsb of upper byte must be 0 */
1156 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1157 chan->chan_type != L2CAP_CHAN_RAW) {
1162 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1167 switch (chan->mode) {
1168 case L2CAP_MODE_BASIC:
1170 case L2CAP_MODE_ERTM:
1171 case L2CAP_MODE_STREAMING:
1180 switch (sk->sk_state) {
1184 /* Already connecting */
1189 /* Already connected */
1203 /* Set destination address and psm */
1204 bacpy(&bt_sk(sk)->dst, src);
1208 auth_type = l2cap_get_auth_type(chan);
1210 if (chan->dcid == L2CAP_CID_LE_DATA)
1211 hcon = hci_connect(hdev, LE_LINK, dst,
1212 chan->sec_level, auth_type);
1214 hcon = hci_connect(hdev, ACL_LINK, dst,
1215 chan->sec_level, auth_type);
1218 err = PTR_ERR(hcon);
1222 conn = l2cap_conn_add(hcon, 0);
1229 /* Update source addr of the socket */
1230 bacpy(src, conn->src);
1232 l2cap_chan_add(conn, chan);
1234 l2cap_state_change(chan, BT_CONNECT);
1235 __set_chan_timer(chan, sk->sk_sndtimeo);
1237 if (hcon->state == BT_CONNECTED) {
1238 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1239 __clear_chan_timer(chan);
1240 if (l2cap_chan_check_security(chan))
1241 l2cap_state_change(chan, BT_CONNECTED);
1243 l2cap_do_start(chan);
1249 hci_dev_unlock(hdev);
1254 int __l2cap_wait_ack(struct sock *sk)
1256 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1257 DECLARE_WAITQUEUE(wait, current);
1261 add_wait_queue(sk_sleep(sk), &wait);
1262 set_current_state(TASK_INTERRUPTIBLE);
1263 while (chan->unacked_frames > 0 && chan->conn) {
1267 if (signal_pending(current)) {
1268 err = sock_intr_errno(timeo);
1273 timeo = schedule_timeout(timeo);
1275 set_current_state(TASK_INTERRUPTIBLE);
1277 err = sock_error(sk);
1281 set_current_state(TASK_RUNNING);
1282 remove_wait_queue(sk_sleep(sk), &wait);
1286 static void l2cap_monitor_timeout(struct work_struct *work)
1288 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1289 monitor_timer.work);
1290 struct sock *sk = chan->sk;
1292 BT_DBG("chan %p", chan);
1295 if (chan->retry_count >= chan->remote_max_tx) {
1296 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1301 chan->retry_count++;
1302 __set_monitor_timer(chan);
1304 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1308 static void l2cap_retrans_timeout(struct work_struct *work)
1310 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1311 retrans_timer.work);
1312 struct sock *sk = chan->sk;
1314 BT_DBG("chan %p", chan);
1317 chan->retry_count = 1;
1318 __set_monitor_timer(chan);
1320 set_bit(CONN_WAIT_F, &chan->conn_state);
1322 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1326 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1328 struct sk_buff *skb;
1330 while ((skb = skb_peek(&chan->tx_q)) &&
1331 chan->unacked_frames) {
1332 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1335 skb = skb_dequeue(&chan->tx_q);
1338 chan->unacked_frames--;
1341 if (!chan->unacked_frames)
1342 __clear_retrans_timer(chan);
1345 static void l2cap_streaming_send(struct l2cap_chan *chan)
1347 struct sk_buff *skb;
1351 while ((skb = skb_dequeue(&chan->tx_q))) {
1352 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1353 control |= __set_txseq(chan, chan->next_tx_seq);
1354 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1356 if (chan->fcs == L2CAP_FCS_CRC16) {
1357 fcs = crc16(0, (u8 *)skb->data,
1358 skb->len - L2CAP_FCS_SIZE);
1359 put_unaligned_le16(fcs,
1360 skb->data + skb->len - L2CAP_FCS_SIZE);
1363 l2cap_do_send(chan, skb);
1365 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1369 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1371 struct sk_buff *skb, *tx_skb;
1375 skb = skb_peek(&chan->tx_q);
1379 while (bt_cb(skb)->tx_seq != tx_seq) {
1380 if (skb_queue_is_last(&chan->tx_q, skb))
1383 skb = skb_queue_next(&chan->tx_q, skb);
1386 if (chan->remote_max_tx &&
1387 bt_cb(skb)->retries == chan->remote_max_tx) {
1388 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1392 tx_skb = skb_clone(skb, GFP_ATOMIC);
1393 bt_cb(skb)->retries++;
1395 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1396 control &= __get_sar_mask(chan);
1398 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1399 control |= __set_ctrl_final(chan);
1401 control |= __set_reqseq(chan, chan->buffer_seq);
1402 control |= __set_txseq(chan, tx_seq);
1404 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1406 if (chan->fcs == L2CAP_FCS_CRC16) {
1407 fcs = crc16(0, (u8 *)tx_skb->data,
1408 tx_skb->len - L2CAP_FCS_SIZE);
1409 put_unaligned_le16(fcs,
1410 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1413 l2cap_do_send(chan, tx_skb);
1416 static int l2cap_ertm_send(struct l2cap_chan *chan)
1418 struct sk_buff *skb, *tx_skb;
1423 if (chan->state != BT_CONNECTED)
1426 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1428 if (chan->remote_max_tx &&
1429 bt_cb(skb)->retries == chan->remote_max_tx) {
1430 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1434 tx_skb = skb_clone(skb, GFP_ATOMIC);
1436 bt_cb(skb)->retries++;
1438 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1439 control &= __get_sar_mask(chan);
1441 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1442 control |= __set_ctrl_final(chan);
1444 control |= __set_reqseq(chan, chan->buffer_seq);
1445 control |= __set_txseq(chan, chan->next_tx_seq);
1447 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1449 if (chan->fcs == L2CAP_FCS_CRC16) {
1450 fcs = crc16(0, (u8 *)skb->data,
1451 tx_skb->len - L2CAP_FCS_SIZE);
1452 put_unaligned_le16(fcs, skb->data +
1453 tx_skb->len - L2CAP_FCS_SIZE);
1456 l2cap_do_send(chan, tx_skb);
1458 __set_retrans_timer(chan);
1460 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1462 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1464 if (bt_cb(skb)->retries == 1)
1465 chan->unacked_frames++;
1467 chan->frames_sent++;
1469 if (skb_queue_is_last(&chan->tx_q, skb))
1470 chan->tx_send_head = NULL;
1472 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1480 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1484 if (!skb_queue_empty(&chan->tx_q))
1485 chan->tx_send_head = chan->tx_q.next;
1487 chan->next_tx_seq = chan->expected_ack_seq;
1488 ret = l2cap_ertm_send(chan);
1492 static void l2cap_send_ack(struct l2cap_chan *chan)
1496 control |= __set_reqseq(chan, chan->buffer_seq);
1498 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1499 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1500 set_bit(CONN_RNR_SENT, &chan->conn_state);
1501 l2cap_send_sframe(chan, control);
1505 if (l2cap_ertm_send(chan) > 0)
1508 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1509 l2cap_send_sframe(chan, control);
1512 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1514 struct srej_list *tail;
1517 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1518 control |= __set_ctrl_final(chan);
1520 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1521 control |= __set_reqseq(chan, tail->tx_seq);
1523 l2cap_send_sframe(chan, control);
1526 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1528 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1529 struct sk_buff **frag;
1532 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1538 /* Continuation fragments (no L2CAP header) */
1539 frag = &skb_shinfo(skb)->frag_list;
1541 count = min_t(unsigned int, conn->mtu, len);
1543 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1546 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1549 (*frag)->priority = skb->priority;
1554 frag = &(*frag)->next;
1560 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1561 struct msghdr *msg, size_t len,
1564 struct sock *sk = chan->sk;
1565 struct l2cap_conn *conn = chan->conn;
1566 struct sk_buff *skb;
1567 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1568 struct l2cap_hdr *lh;
1570 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1572 count = min_t(unsigned int, (conn->mtu - hlen), len);
1573 skb = bt_skb_send_alloc(sk, count + hlen,
1574 msg->msg_flags & MSG_DONTWAIT, &err);
1576 return ERR_PTR(err);
1578 skb->priority = priority;
1580 /* Create L2CAP header */
1581 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1582 lh->cid = cpu_to_le16(chan->dcid);
1583 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1584 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1586 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1587 if (unlikely(err < 0)) {
1589 return ERR_PTR(err);
1594 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1595 struct msghdr *msg, size_t len,
1598 struct sock *sk = chan->sk;
1599 struct l2cap_conn *conn = chan->conn;
1600 struct sk_buff *skb;
1601 int err, count, hlen = L2CAP_HDR_SIZE;
1602 struct l2cap_hdr *lh;
1604 BT_DBG("sk %p len %d", sk, (int)len);
1606 count = min_t(unsigned int, (conn->mtu - hlen), len);
1607 skb = bt_skb_send_alloc(sk, count + hlen,
1608 msg->msg_flags & MSG_DONTWAIT, &err);
1610 return ERR_PTR(err);
1612 skb->priority = priority;
1614 /* Create L2CAP header */
1615 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1616 lh->cid = cpu_to_le16(chan->dcid);
1617 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1619 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1620 if (unlikely(err < 0)) {
1622 return ERR_PTR(err);
1627 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1628 struct msghdr *msg, size_t len,
1629 u32 control, u16 sdulen)
1631 struct sock *sk = chan->sk;
1632 struct l2cap_conn *conn = chan->conn;
1633 struct sk_buff *skb;
1634 int err, count, hlen;
1635 struct l2cap_hdr *lh;
1637 BT_DBG("sk %p len %d", sk, (int)len);
1640 return ERR_PTR(-ENOTCONN);
1642 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1643 hlen = L2CAP_EXT_HDR_SIZE;
1645 hlen = L2CAP_ENH_HDR_SIZE;
1648 hlen += L2CAP_SDULEN_SIZE;
1650 if (chan->fcs == L2CAP_FCS_CRC16)
1651 hlen += L2CAP_FCS_SIZE;
1653 count = min_t(unsigned int, (conn->mtu - hlen), len);
1654 skb = bt_skb_send_alloc(sk, count + hlen,
1655 msg->msg_flags & MSG_DONTWAIT, &err);
1657 return ERR_PTR(err);
1659 /* Create L2CAP header */
1660 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1661 lh->cid = cpu_to_le16(chan->dcid);
1662 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1664 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1667 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1669 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1670 if (unlikely(err < 0)) {
1672 return ERR_PTR(err);
1675 if (chan->fcs == L2CAP_FCS_CRC16)
1676 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1678 bt_cb(skb)->retries = 0;
1682 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1684 struct sk_buff *skb;
1685 struct sk_buff_head sar_queue;
1689 skb_queue_head_init(&sar_queue);
1690 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1691 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1693 return PTR_ERR(skb);
1695 __skb_queue_tail(&sar_queue, skb);
1696 len -= chan->remote_mps;
1697 size += chan->remote_mps;
1702 if (len > chan->remote_mps) {
1703 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1704 buflen = chan->remote_mps;
1706 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1710 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1712 skb_queue_purge(&sar_queue);
1713 return PTR_ERR(skb);
1716 __skb_queue_tail(&sar_queue, skb);
1720 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1721 if (chan->tx_send_head == NULL)
1722 chan->tx_send_head = sar_queue.next;
1727 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1730 struct sk_buff *skb;
1734 /* Connectionless channel */
1735 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1736 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1738 return PTR_ERR(skb);
1740 l2cap_do_send(chan, skb);
1744 switch (chan->mode) {
1745 case L2CAP_MODE_BASIC:
1746 /* Check outgoing MTU */
1747 if (len > chan->omtu)
1750 /* Create a basic PDU */
1751 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1753 return PTR_ERR(skb);
1755 l2cap_do_send(chan, skb);
1759 case L2CAP_MODE_ERTM:
1760 case L2CAP_MODE_STREAMING:
1761 /* Entire SDU fits into one PDU */
1762 if (len <= chan->remote_mps) {
1763 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1764 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1767 return PTR_ERR(skb);
1769 __skb_queue_tail(&chan->tx_q, skb);
1771 if (chan->tx_send_head == NULL)
1772 chan->tx_send_head = skb;
1775 /* Segment SDU into multiples PDUs */
1776 err = l2cap_sar_segment_sdu(chan, msg, len);
1781 if (chan->mode == L2CAP_MODE_STREAMING) {
1782 l2cap_streaming_send(chan);
1787 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1788 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1793 err = l2cap_ertm_send(chan);
1800 BT_DBG("bad state %1.1x", chan->mode);
1807 /* Copy frame to all raw sockets on that connection */
1808 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1810 struct sk_buff *nskb;
1811 struct l2cap_chan *chan;
1813 BT_DBG("conn %p", conn);
1817 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1818 struct sock *sk = chan->sk;
1819 if (chan->chan_type != L2CAP_CHAN_RAW)
1822 /* Don't send frame to the socket it came from */
1825 nskb = skb_clone(skb, GFP_ATOMIC);
1829 if (chan->ops->recv(chan->data, nskb))
1836 /* ---- L2CAP signalling commands ---- */
1837 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1838 u8 code, u8 ident, u16 dlen, void *data)
1840 struct sk_buff *skb, **frag;
1841 struct l2cap_cmd_hdr *cmd;
1842 struct l2cap_hdr *lh;
1845 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1846 conn, code, ident, dlen);
1848 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1849 count = min_t(unsigned int, conn->mtu, len);
1851 skb = bt_skb_alloc(count, GFP_ATOMIC);
1855 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1856 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1858 if (conn->hcon->type == LE_LINK)
1859 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1861 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1863 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1866 cmd->len = cpu_to_le16(dlen);
1869 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1870 memcpy(skb_put(skb, count), data, count);
1876 /* Continuation fragments (no L2CAP header) */
1877 frag = &skb_shinfo(skb)->frag_list;
1879 count = min_t(unsigned int, conn->mtu, len);
1881 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1885 memcpy(skb_put(*frag, count), data, count);
1890 frag = &(*frag)->next;
1900 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1902 struct l2cap_conf_opt *opt = *ptr;
1905 len = L2CAP_CONF_OPT_SIZE + opt->len;
1913 *val = *((u8 *) opt->val);
1917 *val = get_unaligned_le16(opt->val);
1921 *val = get_unaligned_le32(opt->val);
1925 *val = (unsigned long) opt->val;
1929 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1933 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1935 struct l2cap_conf_opt *opt = *ptr;
1937 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1944 *((u8 *) opt->val) = val;
1948 put_unaligned_le16(val, opt->val);
1952 put_unaligned_le32(val, opt->val);
1956 memcpy(opt->val, (void *) val, len);
1960 *ptr += L2CAP_CONF_OPT_SIZE + len;
1963 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1965 struct l2cap_conf_efs efs;
1967 switch (chan->mode) {
1968 case L2CAP_MODE_ERTM:
1969 efs.id = chan->local_id;
1970 efs.stype = chan->local_stype;
1971 efs.msdu = cpu_to_le16(chan->local_msdu);
1972 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1973 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1974 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1977 case L2CAP_MODE_STREAMING:
1979 efs.stype = L2CAP_SERV_BESTEFFORT;
1980 efs.msdu = cpu_to_le16(chan->local_msdu);
1981 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1990 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1991 (unsigned long) &efs);
1994 static void l2cap_ack_timeout(struct work_struct *work)
1996 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1999 lock_sock(chan->sk);
2000 l2cap_send_ack(chan);
2001 release_sock(chan->sk);
2004 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2006 chan->expected_ack_seq = 0;
2007 chan->unacked_frames = 0;
2008 chan->buffer_seq = 0;
2009 chan->num_acked = 0;
2010 chan->frames_sent = 0;
2012 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2013 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2014 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2016 skb_queue_head_init(&chan->srej_q);
2018 INIT_LIST_HEAD(&chan->srej_l);
2021 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2024 case L2CAP_MODE_STREAMING:
2025 case L2CAP_MODE_ERTM:
2026 if (l2cap_mode_supported(mode, remote_feat_mask))
2030 return L2CAP_MODE_BASIC;
2034 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2036 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2039 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2041 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2044 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2046 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2047 __l2cap_ews_supported(chan)) {
2048 /* use extended control field */
2049 set_bit(FLAG_EXT_CTRL, &chan->flags);
2050 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2052 chan->tx_win = min_t(u16, chan->tx_win,
2053 L2CAP_DEFAULT_TX_WINDOW);
2054 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2058 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2060 struct l2cap_conf_req *req = data;
2061 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2062 void *ptr = req->data;
2065 BT_DBG("chan %p", chan);
2067 if (chan->num_conf_req || chan->num_conf_rsp)
2070 switch (chan->mode) {
2071 case L2CAP_MODE_STREAMING:
2072 case L2CAP_MODE_ERTM:
2073 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2076 if (__l2cap_efs_supported(chan))
2077 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2081 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2086 if (chan->imtu != L2CAP_DEFAULT_MTU)
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2089 switch (chan->mode) {
2090 case L2CAP_MODE_BASIC:
2091 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2092 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2095 rfc.mode = L2CAP_MODE_BASIC;
2097 rfc.max_transmit = 0;
2098 rfc.retrans_timeout = 0;
2099 rfc.monitor_timeout = 0;
2100 rfc.max_pdu_size = 0;
2102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2103 (unsigned long) &rfc);
2106 case L2CAP_MODE_ERTM:
2107 rfc.mode = L2CAP_MODE_ERTM;
2108 rfc.max_transmit = chan->max_tx;
2109 rfc.retrans_timeout = 0;
2110 rfc.monitor_timeout = 0;
2112 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2113 L2CAP_EXT_HDR_SIZE -
2116 rfc.max_pdu_size = cpu_to_le16(size);
2118 l2cap_txwin_setup(chan);
2120 rfc.txwin_size = min_t(u16, chan->tx_win,
2121 L2CAP_DEFAULT_TX_WINDOW);
2123 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2124 (unsigned long) &rfc);
2126 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2127 l2cap_add_opt_efs(&ptr, chan);
2129 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2132 if (chan->fcs == L2CAP_FCS_NONE ||
2133 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2134 chan->fcs = L2CAP_FCS_NONE;
2135 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2138 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2143 case L2CAP_MODE_STREAMING:
2144 rfc.mode = L2CAP_MODE_STREAMING;
2146 rfc.max_transmit = 0;
2147 rfc.retrans_timeout = 0;
2148 rfc.monitor_timeout = 0;
2150 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2151 L2CAP_EXT_HDR_SIZE -
2154 rfc.max_pdu_size = cpu_to_le16(size);
2156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2157 (unsigned long) &rfc);
2159 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2160 l2cap_add_opt_efs(&ptr, chan);
2162 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2165 if (chan->fcs == L2CAP_FCS_NONE ||
2166 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2167 chan->fcs = L2CAP_FCS_NONE;
2168 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2173 req->dcid = cpu_to_le16(chan->dcid);
2174 req->flags = cpu_to_le16(0);
2179 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2181 struct l2cap_conf_rsp *rsp = data;
2182 void *ptr = rsp->data;
2183 void *req = chan->conf_req;
2184 int len = chan->conf_len;
2185 int type, hint, olen;
2187 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2188 struct l2cap_conf_efs efs;
2190 u16 mtu = L2CAP_DEFAULT_MTU;
2191 u16 result = L2CAP_CONF_SUCCESS;
2194 BT_DBG("chan %p", chan);
2196 while (len >= L2CAP_CONF_OPT_SIZE) {
2197 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2199 hint = type & L2CAP_CONF_HINT;
2200 type &= L2CAP_CONF_MASK;
2203 case L2CAP_CONF_MTU:
2207 case L2CAP_CONF_FLUSH_TO:
2208 chan->flush_to = val;
2211 case L2CAP_CONF_QOS:
2214 case L2CAP_CONF_RFC:
2215 if (olen == sizeof(rfc))
2216 memcpy(&rfc, (void *) val, olen);
2219 case L2CAP_CONF_FCS:
2220 if (val == L2CAP_FCS_NONE)
2221 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2224 case L2CAP_CONF_EFS:
2226 if (olen == sizeof(efs))
2227 memcpy(&efs, (void *) val, olen);
2230 case L2CAP_CONF_EWS:
2232 return -ECONNREFUSED;
2234 set_bit(FLAG_EXT_CTRL, &chan->flags);
2235 set_bit(CONF_EWS_RECV, &chan->conf_state);
2236 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2237 chan->remote_tx_win = val;
2244 result = L2CAP_CONF_UNKNOWN;
2245 *((u8 *) ptr++) = type;
2250 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2253 switch (chan->mode) {
2254 case L2CAP_MODE_STREAMING:
2255 case L2CAP_MODE_ERTM:
2256 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2257 chan->mode = l2cap_select_mode(rfc.mode,
2258 chan->conn->feat_mask);
2263 if (__l2cap_efs_supported(chan))
2264 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2266 return -ECONNREFUSED;
2269 if (chan->mode != rfc.mode)
2270 return -ECONNREFUSED;
2276 if (chan->mode != rfc.mode) {
2277 result = L2CAP_CONF_UNACCEPT;
2278 rfc.mode = chan->mode;
2280 if (chan->num_conf_rsp == 1)
2281 return -ECONNREFUSED;
2283 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2284 sizeof(rfc), (unsigned long) &rfc);
2287 if (result == L2CAP_CONF_SUCCESS) {
2288 /* Configure output options and let the other side know
2289 * which ones we don't like. */
2291 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2292 result = L2CAP_CONF_UNACCEPT;
2295 set_bit(CONF_MTU_DONE, &chan->conf_state);
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2300 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2301 efs.stype != L2CAP_SERV_NOTRAFIC &&
2302 efs.stype != chan->local_stype) {
2304 result = L2CAP_CONF_UNACCEPT;
2306 if (chan->num_conf_req >= 1)
2307 return -ECONNREFUSED;
2309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2311 (unsigned long) &efs);
2313 /* Send PENDING Conf Rsp */
2314 result = L2CAP_CONF_PENDING;
2315 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2320 case L2CAP_MODE_BASIC:
2321 chan->fcs = L2CAP_FCS_NONE;
2322 set_bit(CONF_MODE_DONE, &chan->conf_state);
2325 case L2CAP_MODE_ERTM:
2326 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2327 chan->remote_tx_win = rfc.txwin_size;
2329 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2331 chan->remote_max_tx = rfc.max_transmit;
2333 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2335 L2CAP_EXT_HDR_SIZE -
2338 rfc.max_pdu_size = cpu_to_le16(size);
2339 chan->remote_mps = size;
2341 rfc.retrans_timeout =
2342 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2343 rfc.monitor_timeout =
2344 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2346 set_bit(CONF_MODE_DONE, &chan->conf_state);
2348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2349 sizeof(rfc), (unsigned long) &rfc);
2351 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2352 chan->remote_id = efs.id;
2353 chan->remote_stype = efs.stype;
2354 chan->remote_msdu = le16_to_cpu(efs.msdu);
2355 chan->remote_flush_to =
2356 le32_to_cpu(efs.flush_to);
2357 chan->remote_acc_lat =
2358 le32_to_cpu(efs.acc_lat);
2359 chan->remote_sdu_itime =
2360 le32_to_cpu(efs.sdu_itime);
2361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2362 sizeof(efs), (unsigned long) &efs);
2366 case L2CAP_MODE_STREAMING:
2367 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2369 L2CAP_EXT_HDR_SIZE -
2372 rfc.max_pdu_size = cpu_to_le16(size);
2373 chan->remote_mps = size;
2375 set_bit(CONF_MODE_DONE, &chan->conf_state);
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2378 sizeof(rfc), (unsigned long) &rfc);
2383 result = L2CAP_CONF_UNACCEPT;
2385 memset(&rfc, 0, sizeof(rfc));
2386 rfc.mode = chan->mode;
2389 if (result == L2CAP_CONF_SUCCESS)
2390 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2392 rsp->scid = cpu_to_le16(chan->dcid);
2393 rsp->result = cpu_to_le16(result);
2394 rsp->flags = cpu_to_le16(0x0000);
2399 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2401 struct l2cap_conf_req *req = data;
2402 void *ptr = req->data;
2405 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2406 struct l2cap_conf_efs efs;
2408 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2410 while (len >= L2CAP_CONF_OPT_SIZE) {
2411 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2414 case L2CAP_CONF_MTU:
2415 if (val < L2CAP_DEFAULT_MIN_MTU) {
2416 *result = L2CAP_CONF_UNACCEPT;
2417 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2420 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2423 case L2CAP_CONF_FLUSH_TO:
2424 chan->flush_to = val;
2425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2429 case L2CAP_CONF_RFC:
2430 if (olen == sizeof(rfc))
2431 memcpy(&rfc, (void *)val, olen);
2433 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2434 rfc.mode != chan->mode)
2435 return -ECONNREFUSED;
2439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2440 sizeof(rfc), (unsigned long) &rfc);
2443 case L2CAP_CONF_EWS:
2444 chan->tx_win = min_t(u16, val,
2445 L2CAP_DEFAULT_EXT_WINDOW);
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2450 case L2CAP_CONF_EFS:
2451 if (olen == sizeof(efs))
2452 memcpy(&efs, (void *)val, olen);
2454 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2455 efs.stype != L2CAP_SERV_NOTRAFIC &&
2456 efs.stype != chan->local_stype)
2457 return -ECONNREFUSED;
2459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2460 sizeof(efs), (unsigned long) &efs);
2465 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2466 return -ECONNREFUSED;
2468 chan->mode = rfc.mode;
2470 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2472 case L2CAP_MODE_ERTM:
2473 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2474 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2475 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2477 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2478 chan->local_msdu = le16_to_cpu(efs.msdu);
2479 chan->local_sdu_itime =
2480 le32_to_cpu(efs.sdu_itime);
2481 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2482 chan->local_flush_to =
2483 le32_to_cpu(efs.flush_to);
2487 case L2CAP_MODE_STREAMING:
2488 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2492 req->dcid = cpu_to_le16(chan->dcid);
2493 req->flags = cpu_to_le16(0x0000);
2498 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2500 struct l2cap_conf_rsp *rsp = data;
2501 void *ptr = rsp->data;
2503 BT_DBG("chan %p", chan);
2505 rsp->scid = cpu_to_le16(chan->dcid);
2506 rsp->result = cpu_to_le16(result);
2507 rsp->flags = cpu_to_le16(flags);
2512 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2514 struct l2cap_conn_rsp rsp;
2515 struct l2cap_conn *conn = chan->conn;
2518 rsp.scid = cpu_to_le16(chan->dcid);
2519 rsp.dcid = cpu_to_le16(chan->scid);
2520 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2521 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2522 l2cap_send_cmd(conn, chan->ident,
2523 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2525 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2528 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2529 l2cap_build_conf_req(chan, buf), buf);
2530 chan->num_conf_req++;
2533 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2537 struct l2cap_conf_rfc rfc;
2539 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2541 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2544 while (len >= L2CAP_CONF_OPT_SIZE) {
2545 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2548 case L2CAP_CONF_RFC:
2549 if (olen == sizeof(rfc))
2550 memcpy(&rfc, (void *)val, olen);
2555 /* Use sane default values in case a misbehaving remote device
2556 * did not send an RFC option.
2558 rfc.mode = chan->mode;
2559 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2560 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2561 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2563 BT_ERR("Expected RFC option was not found, using defaults");
2567 case L2CAP_MODE_ERTM:
2568 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2569 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2570 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2572 case L2CAP_MODE_STREAMING:
2573 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2577 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2579 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2581 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2584 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2585 cmd->ident == conn->info_ident) {
2586 cancel_delayed_work_sync(&conn->info_timer);
2588 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2589 conn->info_ident = 0;
2591 l2cap_conn_start(conn);
2597 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2599 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2600 struct l2cap_conn_rsp rsp;
2601 struct l2cap_chan *chan = NULL, *pchan;
2602 struct sock *parent, *sk = NULL;
2603 int result, status = L2CAP_CS_NO_INFO;
2605 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2606 __le16 psm = req->psm;
2608 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2610 /* Check if we have socket listening on psm */
2611 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2613 result = L2CAP_CR_BAD_PSM;
2621 /* Check if the ACL is secure enough (if not SDP) */
2622 if (psm != cpu_to_le16(0x0001) &&
2623 !hci_conn_check_link_mode(conn->hcon)) {
2624 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2625 result = L2CAP_CR_SEC_BLOCK;
2629 result = L2CAP_CR_NO_MEM;
2631 /* Check for backlog size */
2632 if (sk_acceptq_is_full(parent)) {
2633 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2637 chan = pchan->ops->new_connection(pchan->data);
2643 /* Check if we already have channel with that dcid */
2644 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2645 sock_set_flag(sk, SOCK_ZAPPED);
2646 chan->ops->close(chan->data);
2650 hci_conn_hold(conn->hcon);
2652 bacpy(&bt_sk(sk)->src, conn->src);
2653 bacpy(&bt_sk(sk)->dst, conn->dst);
2657 bt_accept_enqueue(parent, sk);
2659 l2cap_chan_add(conn, chan);
2663 __set_chan_timer(chan, sk->sk_sndtimeo);
2665 chan->ident = cmd->ident;
2667 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2668 if (l2cap_chan_check_security(chan)) {
2669 if (bt_sk(sk)->defer_setup) {
2670 l2cap_state_change(chan, BT_CONNECT2);
2671 result = L2CAP_CR_PEND;
2672 status = L2CAP_CS_AUTHOR_PEND;
2673 parent->sk_data_ready(parent, 0);
2675 l2cap_state_change(chan, BT_CONFIG);
2676 result = L2CAP_CR_SUCCESS;
2677 status = L2CAP_CS_NO_INFO;
2680 l2cap_state_change(chan, BT_CONNECT2);
2681 result = L2CAP_CR_PEND;
2682 status = L2CAP_CS_AUTHEN_PEND;
2685 l2cap_state_change(chan, BT_CONNECT2);
2686 result = L2CAP_CR_PEND;
2687 status = L2CAP_CS_NO_INFO;
2691 release_sock(parent);
2694 rsp.scid = cpu_to_le16(scid);
2695 rsp.dcid = cpu_to_le16(dcid);
2696 rsp.result = cpu_to_le16(result);
2697 rsp.status = cpu_to_le16(status);
2698 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2700 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2701 struct l2cap_info_req info;
2702 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2704 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2705 conn->info_ident = l2cap_get_ident(conn);
2707 schedule_delayed_work(&conn->info_timer,
2708 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2710 l2cap_send_cmd(conn, conn->info_ident,
2711 L2CAP_INFO_REQ, sizeof(info), &info);
2714 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2715 result == L2CAP_CR_SUCCESS) {
2717 set_bit(CONF_REQ_SENT, &chan->conf_state);
2718 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2719 l2cap_build_conf_req(chan, buf), buf);
2720 chan->num_conf_req++;
2726 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2728 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2729 u16 scid, dcid, result, status;
2730 struct l2cap_chan *chan;
2734 scid = __le16_to_cpu(rsp->scid);
2735 dcid = __le16_to_cpu(rsp->dcid);
2736 result = __le16_to_cpu(rsp->result);
2737 status = __le16_to_cpu(rsp->status);
2739 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2742 chan = l2cap_get_chan_by_scid(conn, scid);
2746 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2754 case L2CAP_CR_SUCCESS:
2755 l2cap_state_change(chan, BT_CONFIG);
2758 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2760 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2763 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2764 l2cap_build_conf_req(chan, req), req);
2765 chan->num_conf_req++;
2769 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2773 l2cap_chan_del(chan, ECONNREFUSED);
2781 static inline void set_default_fcs(struct l2cap_chan *chan)
2783 /* FCS is enabled only in ERTM or streaming mode, if one or both
2786 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2787 chan->fcs = L2CAP_FCS_NONE;
2788 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2789 chan->fcs = L2CAP_FCS_CRC16;
2792 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2794 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2797 struct l2cap_chan *chan;
2801 dcid = __le16_to_cpu(req->dcid);
2802 flags = __le16_to_cpu(req->flags);
2804 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2806 chan = l2cap_get_chan_by_scid(conn, dcid);
2812 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2813 struct l2cap_cmd_rej_cid rej;
2815 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2816 rej.scid = cpu_to_le16(chan->scid);
2817 rej.dcid = cpu_to_le16(chan->dcid);
2819 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2824 /* Reject if config buffer is too small. */
2825 len = cmd_len - sizeof(*req);
2826 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2827 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2828 l2cap_build_conf_rsp(chan, rsp,
2829 L2CAP_CONF_REJECT, flags), rsp);
2834 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2835 chan->conf_len += len;
2837 if (flags & 0x0001) {
2838 /* Incomplete config. Send empty response. */
2839 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2840 l2cap_build_conf_rsp(chan, rsp,
2841 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2845 /* Complete config. */
2846 len = l2cap_parse_conf_req(chan, rsp);
2848 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2852 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2853 chan->num_conf_rsp++;
2855 /* Reset config buffer. */
2858 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2861 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2862 set_default_fcs(chan);
2864 l2cap_state_change(chan, BT_CONNECTED);
2866 chan->next_tx_seq = 0;
2867 chan->expected_tx_seq = 0;
2868 skb_queue_head_init(&chan->tx_q);
2869 if (chan->mode == L2CAP_MODE_ERTM)
2870 l2cap_ertm_init(chan);
2872 l2cap_chan_ready(sk);
2876 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2878 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2879 l2cap_build_conf_req(chan, buf), buf);
2880 chan->num_conf_req++;
2883 /* Got Conf Rsp PENDING from remote side and asume we sent
2884 Conf Rsp PENDING in the code above */
2885 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2886 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2888 /* check compatibility */
2890 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2891 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2893 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2894 l2cap_build_conf_rsp(chan, rsp,
2895 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2903 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2905 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2906 u16 scid, flags, result;
2907 struct l2cap_chan *chan;
2909 int len = cmd->len - sizeof(*rsp);
2911 scid = __le16_to_cpu(rsp->scid);
2912 flags = __le16_to_cpu(rsp->flags);
2913 result = __le16_to_cpu(rsp->result);
2915 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2916 scid, flags, result);
2918 chan = l2cap_get_chan_by_scid(conn, scid);
2925 case L2CAP_CONF_SUCCESS:
2926 l2cap_conf_rfc_get(chan, rsp->data, len);
2927 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2930 case L2CAP_CONF_PENDING:
2931 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2933 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2936 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2939 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2943 /* check compatibility */
2945 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2946 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2948 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2949 l2cap_build_conf_rsp(chan, buf,
2950 L2CAP_CONF_SUCCESS, 0x0000), buf);
2954 case L2CAP_CONF_UNACCEPT:
2955 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2958 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2959 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2963 /* throw out any old stored conf requests */
2964 result = L2CAP_CONF_SUCCESS;
2965 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2968 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2972 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2973 L2CAP_CONF_REQ, len, req);
2974 chan->num_conf_req++;
2975 if (result != L2CAP_CONF_SUCCESS)
2981 sk->sk_err = ECONNRESET;
2982 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2983 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2990 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2992 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2993 set_default_fcs(chan);
2995 l2cap_state_change(chan, BT_CONNECTED);
2996 chan->next_tx_seq = 0;
2997 chan->expected_tx_seq = 0;
2998 skb_queue_head_init(&chan->tx_q);
2999 if (chan->mode == L2CAP_MODE_ERTM)
3000 l2cap_ertm_init(chan);
3002 l2cap_chan_ready(sk);
3010 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3012 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3013 struct l2cap_disconn_rsp rsp;
3015 struct l2cap_chan *chan;
3018 scid = __le16_to_cpu(req->scid);
3019 dcid = __le16_to_cpu(req->dcid);
3021 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3023 chan = l2cap_get_chan_by_scid(conn, dcid);
3029 rsp.dcid = cpu_to_le16(chan->scid);
3030 rsp.scid = cpu_to_le16(chan->dcid);
3031 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3033 sk->sk_shutdown = SHUTDOWN_MASK;
3035 l2cap_chan_del(chan, ECONNRESET);
3038 chan->ops->close(chan->data);
3042 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3044 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3046 struct l2cap_chan *chan;
3049 scid = __le16_to_cpu(rsp->scid);
3050 dcid = __le16_to_cpu(rsp->dcid);
3052 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3054 chan = l2cap_get_chan_by_scid(conn, scid);
3060 l2cap_chan_del(chan, 0);
3063 chan->ops->close(chan->data);
3067 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3069 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3072 type = __le16_to_cpu(req->type);
3074 BT_DBG("type 0x%4.4x", type);
3076 if (type == L2CAP_IT_FEAT_MASK) {
3078 u32 feat_mask = l2cap_feat_mask;
3079 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3080 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3081 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3083 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3086 feat_mask |= L2CAP_FEAT_EXT_FLOW
3087 | L2CAP_FEAT_EXT_WINDOW;
3089 put_unaligned_le32(feat_mask, rsp->data);
3090 l2cap_send_cmd(conn, cmd->ident,
3091 L2CAP_INFO_RSP, sizeof(buf), buf);
3092 } else if (type == L2CAP_IT_FIXED_CHAN) {
3094 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3097 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3099 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3101 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3102 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3103 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3104 l2cap_send_cmd(conn, cmd->ident,
3105 L2CAP_INFO_RSP, sizeof(buf), buf);
3107 struct l2cap_info_rsp rsp;
3108 rsp.type = cpu_to_le16(type);
3109 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3110 l2cap_send_cmd(conn, cmd->ident,
3111 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3117 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3119 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3122 type = __le16_to_cpu(rsp->type);
3123 result = __le16_to_cpu(rsp->result);
3125 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3127 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3128 if (cmd->ident != conn->info_ident ||
3129 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3132 cancel_delayed_work_sync(&conn->info_timer);
3134 if (result != L2CAP_IR_SUCCESS) {
3135 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3136 conn->info_ident = 0;
3138 l2cap_conn_start(conn);
3143 if (type == L2CAP_IT_FEAT_MASK) {
3144 conn->feat_mask = get_unaligned_le32(rsp->data);
3146 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3147 struct l2cap_info_req req;
3148 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3150 conn->info_ident = l2cap_get_ident(conn);
3152 l2cap_send_cmd(conn, conn->info_ident,
3153 L2CAP_INFO_REQ, sizeof(req), &req);
3155 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3156 conn->info_ident = 0;
3158 l2cap_conn_start(conn);
3160 } else if (type == L2CAP_IT_FIXED_CHAN) {
3161 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3162 conn->info_ident = 0;
3164 l2cap_conn_start(conn);
3170 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3171 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3174 struct l2cap_create_chan_req *req = data;
3175 struct l2cap_create_chan_rsp rsp;
3178 if (cmd_len != sizeof(*req))
3184 psm = le16_to_cpu(req->psm);
3185 scid = le16_to_cpu(req->scid);
3187 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3189 /* Placeholder: Always reject */
3191 rsp.scid = cpu_to_le16(scid);
3192 rsp.result = L2CAP_CR_NO_MEM;
3193 rsp.status = L2CAP_CS_NO_INFO;
3195 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3201 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3202 struct l2cap_cmd_hdr *cmd, void *data)
3204 BT_DBG("conn %p", conn);
3206 return l2cap_connect_rsp(conn, cmd, data);
3209 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3210 u16 icid, u16 result)
3212 struct l2cap_move_chan_rsp rsp;
3214 BT_DBG("icid %d, result %d", icid, result);
3216 rsp.icid = cpu_to_le16(icid);
3217 rsp.result = cpu_to_le16(result);
3219 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3222 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3223 struct l2cap_chan *chan, u16 icid, u16 result)
3225 struct l2cap_move_chan_cfm cfm;
3228 BT_DBG("icid %d, result %d", icid, result);
3230 ident = l2cap_get_ident(conn);
3232 chan->ident = ident;
3234 cfm.icid = cpu_to_le16(icid);
3235 cfm.result = cpu_to_le16(result);
3237 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3240 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3243 struct l2cap_move_chan_cfm_rsp rsp;
3245 BT_DBG("icid %d", icid);
3247 rsp.icid = cpu_to_le16(icid);
3248 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3251 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3252 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3254 struct l2cap_move_chan_req *req = data;
3256 u16 result = L2CAP_MR_NOT_ALLOWED;
3258 if (cmd_len != sizeof(*req))
3261 icid = le16_to_cpu(req->icid);
3263 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3268 /* Placeholder: Always refuse */
3269 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3274 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3275 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3277 struct l2cap_move_chan_rsp *rsp = data;
3280 if (cmd_len != sizeof(*rsp))
3283 icid = le16_to_cpu(rsp->icid);
3284 result = le16_to_cpu(rsp->result);
3286 BT_DBG("icid %d, result %d", icid, result);
3288 /* Placeholder: Always unconfirmed */
3289 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3294 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3295 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3297 struct l2cap_move_chan_cfm *cfm = data;
3300 if (cmd_len != sizeof(*cfm))
3303 icid = le16_to_cpu(cfm->icid);
3304 result = le16_to_cpu(cfm->result);
3306 BT_DBG("icid %d, result %d", icid, result);
3308 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3313 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3314 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3316 struct l2cap_move_chan_cfm_rsp *rsp = data;
3319 if (cmd_len != sizeof(*rsp))
3322 icid = le16_to_cpu(rsp->icid);
3324 BT_DBG("icid %d", icid);
3329 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3334 if (min > max || min < 6 || max > 3200)
3337 if (to_multiplier < 10 || to_multiplier > 3200)
3340 if (max >= to_multiplier * 8)
3343 max_latency = (to_multiplier * 8 / max) - 1;
3344 if (latency > 499 || latency > max_latency)
3350 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3351 struct l2cap_cmd_hdr *cmd, u8 *data)
3353 struct hci_conn *hcon = conn->hcon;
3354 struct l2cap_conn_param_update_req *req;
3355 struct l2cap_conn_param_update_rsp rsp;
3356 u16 min, max, latency, to_multiplier, cmd_len;
3359 if (!(hcon->link_mode & HCI_LM_MASTER))
3362 cmd_len = __le16_to_cpu(cmd->len);
3363 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3366 req = (struct l2cap_conn_param_update_req *) data;
3367 min = __le16_to_cpu(req->min);
3368 max = __le16_to_cpu(req->max);
3369 latency = __le16_to_cpu(req->latency);
3370 to_multiplier = __le16_to_cpu(req->to_multiplier);
3372 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3373 min, max, latency, to_multiplier);
3375 memset(&rsp, 0, sizeof(rsp));
3377 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3379 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3381 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3383 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3387 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3392 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3393 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3397 switch (cmd->code) {
3398 case L2CAP_COMMAND_REJ:
3399 l2cap_command_rej(conn, cmd, data);
3402 case L2CAP_CONN_REQ:
3403 err = l2cap_connect_req(conn, cmd, data);
3406 case L2CAP_CONN_RSP:
3407 err = l2cap_connect_rsp(conn, cmd, data);
3410 case L2CAP_CONF_REQ:
3411 err = l2cap_config_req(conn, cmd, cmd_len, data);
3414 case L2CAP_CONF_RSP:
3415 err = l2cap_config_rsp(conn, cmd, data);
3418 case L2CAP_DISCONN_REQ:
3419 err = l2cap_disconnect_req(conn, cmd, data);
3422 case L2CAP_DISCONN_RSP:
3423 err = l2cap_disconnect_rsp(conn, cmd, data);
3426 case L2CAP_ECHO_REQ:
3427 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3430 case L2CAP_ECHO_RSP:
3433 case L2CAP_INFO_REQ:
3434 err = l2cap_information_req(conn, cmd, data);
3437 case L2CAP_INFO_RSP:
3438 err = l2cap_information_rsp(conn, cmd, data);
3441 case L2CAP_CREATE_CHAN_REQ:
3442 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3445 case L2CAP_CREATE_CHAN_RSP:
3446 err = l2cap_create_channel_rsp(conn, cmd, data);
3449 case L2CAP_MOVE_CHAN_REQ:
3450 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3453 case L2CAP_MOVE_CHAN_RSP:
3454 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3457 case L2CAP_MOVE_CHAN_CFM:
3458 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3461 case L2CAP_MOVE_CHAN_CFM_RSP:
3462 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3466 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3474 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3475 struct l2cap_cmd_hdr *cmd, u8 *data)
3477 switch (cmd->code) {
3478 case L2CAP_COMMAND_REJ:
3481 case L2CAP_CONN_PARAM_UPDATE_REQ:
3482 return l2cap_conn_param_update_req(conn, cmd, data);
3484 case L2CAP_CONN_PARAM_UPDATE_RSP:
3488 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3493 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3494 struct sk_buff *skb)
3496 u8 *data = skb->data;
3498 struct l2cap_cmd_hdr cmd;
3501 l2cap_raw_recv(conn, skb);
3503 while (len >= L2CAP_CMD_HDR_SIZE) {
3505 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3506 data += L2CAP_CMD_HDR_SIZE;
3507 len -= L2CAP_CMD_HDR_SIZE;
3509 cmd_len = le16_to_cpu(cmd.len);
3511 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3513 if (cmd_len > len || !cmd.ident) {
3514 BT_DBG("corrupted command");
3518 if (conn->hcon->type == LE_LINK)
3519 err = l2cap_le_sig_cmd(conn, &cmd, data);
3521 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3524 struct l2cap_cmd_rej_unk rej;
3526 BT_ERR("Wrong link type (%d)", err);
3528 /* FIXME: Map err to a valid reason */
3529 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3530 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3540 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3542 u16 our_fcs, rcv_fcs;
3545 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3546 hdr_size = L2CAP_EXT_HDR_SIZE;
3548 hdr_size = L2CAP_ENH_HDR_SIZE;
3550 if (chan->fcs == L2CAP_FCS_CRC16) {
3551 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3552 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3553 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3555 if (our_fcs != rcv_fcs)
3561 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3565 chan->frames_sent = 0;
3567 control |= __set_reqseq(chan, chan->buffer_seq);
3569 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3570 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3571 l2cap_send_sframe(chan, control);
3572 set_bit(CONN_RNR_SENT, &chan->conn_state);
3575 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3576 l2cap_retransmit_frames(chan);
3578 l2cap_ertm_send(chan);
3580 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3581 chan->frames_sent == 0) {
3582 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3583 l2cap_send_sframe(chan, control);
3587 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3589 struct sk_buff *next_skb;
3590 int tx_seq_offset, next_tx_seq_offset;
3592 bt_cb(skb)->tx_seq = tx_seq;
3593 bt_cb(skb)->sar = sar;
3595 next_skb = skb_peek(&chan->srej_q);
3597 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3600 if (bt_cb(next_skb)->tx_seq == tx_seq)
3603 next_tx_seq_offset = __seq_offset(chan,
3604 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3606 if (next_tx_seq_offset > tx_seq_offset) {
3607 __skb_queue_before(&chan->srej_q, next_skb, skb);
3611 if (skb_queue_is_last(&chan->srej_q, next_skb))
3614 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3617 __skb_queue_tail(&chan->srej_q, skb);
3622 static void append_skb_frag(struct sk_buff *skb,
3623 struct sk_buff *new_frag, struct sk_buff **last_frag)
3625 /* skb->len reflects data in skb as well as all fragments
3626 * skb->data_len reflects only data in fragments
3628 if (!skb_has_frag_list(skb))
3629 skb_shinfo(skb)->frag_list = new_frag;
3631 new_frag->next = NULL;
3633 (*last_frag)->next = new_frag;
3634 *last_frag = new_frag;
3636 skb->len += new_frag->len;
3637 skb->data_len += new_frag->len;
3638 skb->truesize += new_frag->truesize;
3641 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3645 switch (__get_ctrl_sar(chan, control)) {
3646 case L2CAP_SAR_UNSEGMENTED:
3650 err = chan->ops->recv(chan->data, skb);
3653 case L2CAP_SAR_START:
3657 chan->sdu_len = get_unaligned_le16(skb->data);
3658 skb_pull(skb, L2CAP_SDULEN_SIZE);
3660 if (chan->sdu_len > chan->imtu) {
3665 if (skb->len >= chan->sdu_len)
3669 chan->sdu_last_frag = skb;
3675 case L2CAP_SAR_CONTINUE:
3679 append_skb_frag(chan->sdu, skb,
3680 &chan->sdu_last_frag);
3683 if (chan->sdu->len >= chan->sdu_len)
3693 append_skb_frag(chan->sdu, skb,
3694 &chan->sdu_last_frag);
3697 if (chan->sdu->len != chan->sdu_len)
3700 err = chan->ops->recv(chan->data, chan->sdu);
3703 /* Reassembly complete */
3705 chan->sdu_last_frag = NULL;
3713 kfree_skb(chan->sdu);
3715 chan->sdu_last_frag = NULL;
3722 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3726 BT_DBG("chan %p, Enter local busy", chan);
3728 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3730 control = __set_reqseq(chan, chan->buffer_seq);
3731 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3732 l2cap_send_sframe(chan, control);
3734 set_bit(CONN_RNR_SENT, &chan->conn_state);
3736 __clear_ack_timer(chan);
3739 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3743 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3746 control = __set_reqseq(chan, chan->buffer_seq);
3747 control |= __set_ctrl_poll(chan);
3748 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3749 l2cap_send_sframe(chan, control);
3750 chan->retry_count = 1;
3752 __clear_retrans_timer(chan);
3753 __set_monitor_timer(chan);
3755 set_bit(CONN_WAIT_F, &chan->conn_state);
3758 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3759 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3761 BT_DBG("chan %p, Exit local busy", chan);
3764 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3766 if (chan->mode == L2CAP_MODE_ERTM) {
3768 l2cap_ertm_enter_local_busy(chan);
3770 l2cap_ertm_exit_local_busy(chan);
3774 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3776 struct sk_buff *skb;
3779 while ((skb = skb_peek(&chan->srej_q)) &&
3780 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3783 if (bt_cb(skb)->tx_seq != tx_seq)
3786 skb = skb_dequeue(&chan->srej_q);
3787 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3788 err = l2cap_reassemble_sdu(chan, skb, control);
3791 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3795 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3796 tx_seq = __next_seq(chan, tx_seq);
3800 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3802 struct srej_list *l, *tmp;
3805 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3806 if (l->tx_seq == tx_seq) {
3811 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3812 control |= __set_reqseq(chan, l->tx_seq);
3813 l2cap_send_sframe(chan, control);
3815 list_add_tail(&l->list, &chan->srej_l);
3819 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3821 struct srej_list *new;
3824 while (tx_seq != chan->expected_tx_seq) {
3825 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3826 control |= __set_reqseq(chan, chan->expected_tx_seq);
3827 l2cap_send_sframe(chan, control);
3829 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3833 new->tx_seq = chan->expected_tx_seq;
3835 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3837 list_add_tail(&new->list, &chan->srej_l);
3840 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3845 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3847 u16 tx_seq = __get_txseq(chan, rx_control);
3848 u16 req_seq = __get_reqseq(chan, rx_control);
3849 u8 sar = __get_ctrl_sar(chan, rx_control);
3850 int tx_seq_offset, expected_tx_seq_offset;
3851 int num_to_ack = (chan->tx_win/6) + 1;
3854 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3855 tx_seq, rx_control);
3857 if (__is_ctrl_final(chan, rx_control) &&
3858 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3859 __clear_monitor_timer(chan);
3860 if (chan->unacked_frames > 0)
3861 __set_retrans_timer(chan);
3862 clear_bit(CONN_WAIT_F, &chan->conn_state);
3865 chan->expected_ack_seq = req_seq;
3866 l2cap_drop_acked_frames(chan);
3868 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3870 /* invalid tx_seq */
3871 if (tx_seq_offset >= chan->tx_win) {
3872 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3876 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3879 if (tx_seq == chan->expected_tx_seq)
3882 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3883 struct srej_list *first;
3885 first = list_first_entry(&chan->srej_l,
3886 struct srej_list, list);
3887 if (tx_seq == first->tx_seq) {
3888 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3889 l2cap_check_srej_gap(chan, tx_seq);
3891 list_del(&first->list);
3894 if (list_empty(&chan->srej_l)) {
3895 chan->buffer_seq = chan->buffer_seq_srej;
3896 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3897 l2cap_send_ack(chan);
3898 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3901 struct srej_list *l;
3903 /* duplicated tx_seq */
3904 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3907 list_for_each_entry(l, &chan->srej_l, list) {
3908 if (l->tx_seq == tx_seq) {
3909 l2cap_resend_srejframe(chan, tx_seq);
3914 err = l2cap_send_srejframe(chan, tx_seq);
3916 l2cap_send_disconn_req(chan->conn, chan, -err);
3921 expected_tx_seq_offset = __seq_offset(chan,
3922 chan->expected_tx_seq, chan->buffer_seq);
3924 /* duplicated tx_seq */
3925 if (tx_seq_offset < expected_tx_seq_offset)
3928 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3930 BT_DBG("chan %p, Enter SREJ", chan);
3932 INIT_LIST_HEAD(&chan->srej_l);
3933 chan->buffer_seq_srej = chan->buffer_seq;
3935 __skb_queue_head_init(&chan->srej_q);
3936 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3938 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3940 err = l2cap_send_srejframe(chan, tx_seq);
3942 l2cap_send_disconn_req(chan->conn, chan, -err);
3946 __clear_ack_timer(chan);
3951 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3953 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3954 bt_cb(skb)->tx_seq = tx_seq;
3955 bt_cb(skb)->sar = sar;
3956 __skb_queue_tail(&chan->srej_q, skb);
3960 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3961 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3964 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3968 if (__is_ctrl_final(chan, rx_control)) {
3969 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3970 l2cap_retransmit_frames(chan);
3974 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3975 if (chan->num_acked == num_to_ack - 1)
3976 l2cap_send_ack(chan);
3978 __set_ack_timer(chan);
3987 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3989 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3990 __get_reqseq(chan, rx_control), rx_control);
3992 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3993 l2cap_drop_acked_frames(chan);
3995 if (__is_ctrl_poll(chan, rx_control)) {
3996 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3997 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3998 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3999 (chan->unacked_frames > 0))
4000 __set_retrans_timer(chan);
4002 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4003 l2cap_send_srejtail(chan);
4005 l2cap_send_i_or_rr_or_rnr(chan);
4008 } else if (__is_ctrl_final(chan, rx_control)) {
4009 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4011 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4012 l2cap_retransmit_frames(chan);
4015 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4016 (chan->unacked_frames > 0))
4017 __set_retrans_timer(chan);
4019 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4020 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4021 l2cap_send_ack(chan);
4023 l2cap_ertm_send(chan);
4027 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4029 u16 tx_seq = __get_reqseq(chan, rx_control);
4031 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4033 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4035 chan->expected_ack_seq = tx_seq;
4036 l2cap_drop_acked_frames(chan);
4038 if (__is_ctrl_final(chan, rx_control)) {
4039 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4040 l2cap_retransmit_frames(chan);
4042 l2cap_retransmit_frames(chan);
4044 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4045 set_bit(CONN_REJ_ACT, &chan->conn_state);
4048 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4050 u16 tx_seq = __get_reqseq(chan, rx_control);
4052 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4054 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4056 if (__is_ctrl_poll(chan, rx_control)) {
4057 chan->expected_ack_seq = tx_seq;
4058 l2cap_drop_acked_frames(chan);
4060 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4061 l2cap_retransmit_one_frame(chan, tx_seq);
4063 l2cap_ertm_send(chan);
4065 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4066 chan->srej_save_reqseq = tx_seq;
4067 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4069 } else if (__is_ctrl_final(chan, rx_control)) {
4070 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4071 chan->srej_save_reqseq == tx_seq)
4072 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4074 l2cap_retransmit_one_frame(chan, tx_seq);
4076 l2cap_retransmit_one_frame(chan, tx_seq);
4077 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4078 chan->srej_save_reqseq = tx_seq;
4079 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4084 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4086 u16 tx_seq = __get_reqseq(chan, rx_control);
4088 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4091 chan->expected_ack_seq = tx_seq;
4092 l2cap_drop_acked_frames(chan);
4094 if (__is_ctrl_poll(chan, rx_control))
4095 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4097 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4098 __clear_retrans_timer(chan);
4099 if (__is_ctrl_poll(chan, rx_control))
4100 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4104 if (__is_ctrl_poll(chan, rx_control)) {
4105 l2cap_send_srejtail(chan);
4107 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4108 l2cap_send_sframe(chan, rx_control);
4112 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4114 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4116 if (__is_ctrl_final(chan, rx_control) &&
4117 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4118 __clear_monitor_timer(chan);
4119 if (chan->unacked_frames > 0)
4120 __set_retrans_timer(chan);
4121 clear_bit(CONN_WAIT_F, &chan->conn_state);
4124 switch (__get_ctrl_super(chan, rx_control)) {
4125 case L2CAP_SUPER_RR:
4126 l2cap_data_channel_rrframe(chan, rx_control);
4129 case L2CAP_SUPER_REJ:
4130 l2cap_data_channel_rejframe(chan, rx_control);
4133 case L2CAP_SUPER_SREJ:
4134 l2cap_data_channel_srejframe(chan, rx_control);
4137 case L2CAP_SUPER_RNR:
4138 l2cap_data_channel_rnrframe(chan, rx_control);
4146 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4148 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4151 int len, next_tx_seq_offset, req_seq_offset;
4153 control = __get_control(chan, skb->data);
4154 skb_pull(skb, __ctrl_size(chan));
4158 * We can just drop the corrupted I-frame here.
4159 * Receiver will miss it and start proper recovery
4160 * procedures and ask retransmission.
4162 if (l2cap_check_fcs(chan, skb))
4165 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4166 len -= L2CAP_SDULEN_SIZE;
4168 if (chan->fcs == L2CAP_FCS_CRC16)
4169 len -= L2CAP_FCS_SIZE;
4171 if (len > chan->mps) {
4172 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4176 req_seq = __get_reqseq(chan, control);
4178 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4180 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4181 chan->expected_ack_seq);
4183 /* check for invalid req-seq */
4184 if (req_seq_offset > next_tx_seq_offset) {
4185 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4189 if (!__is_sframe(chan, control)) {
4191 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4195 l2cap_data_channel_iframe(chan, control, skb);
4199 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4203 l2cap_data_channel_sframe(chan, control, skb);
4213 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4215 struct l2cap_chan *chan;
4216 struct sock *sk = NULL;
4221 chan = l2cap_get_chan_by_scid(conn, cid);
4223 BT_DBG("unknown cid 0x%4.4x", cid);
4229 BT_DBG("chan %p, len %d", chan, skb->len);
4231 if (chan->state != BT_CONNECTED)
4234 switch (chan->mode) {
4235 case L2CAP_MODE_BASIC:
4236 /* If socket recv buffers overflows we drop data here
4237 * which is *bad* because L2CAP has to be reliable.
4238 * But we don't have any other choice. L2CAP doesn't
4239 * provide flow control mechanism. */
4241 if (chan->imtu < skb->len)
4244 if (!chan->ops->recv(chan->data, skb))
4248 case L2CAP_MODE_ERTM:
4249 l2cap_ertm_data_rcv(sk, skb);
4253 case L2CAP_MODE_STREAMING:
4254 control = __get_control(chan, skb->data);
4255 skb_pull(skb, __ctrl_size(chan));
4258 if (l2cap_check_fcs(chan, skb))
4261 if (__is_sar_start(chan, control))
4262 len -= L2CAP_SDULEN_SIZE;
4264 if (chan->fcs == L2CAP_FCS_CRC16)
4265 len -= L2CAP_FCS_SIZE;
4267 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4270 tx_seq = __get_txseq(chan, control);
4272 if (chan->expected_tx_seq != tx_seq) {
4273 /* Frame(s) missing - must discard partial SDU */
4274 kfree_skb(chan->sdu);
4276 chan->sdu_last_frag = NULL;
4279 /* TODO: Notify userland of missing data */
4282 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4284 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4285 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4290 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4304 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4306 struct sock *sk = NULL;
4307 struct l2cap_chan *chan;
4309 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4317 BT_DBG("sk %p, len %d", sk, skb->len);
4319 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4322 if (chan->imtu < skb->len)
4325 if (!chan->ops->recv(chan->data, skb))
4337 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4339 struct sock *sk = NULL;
4340 struct l2cap_chan *chan;
4342 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4350 BT_DBG("sk %p, len %d", sk, skb->len);
4352 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4355 if (chan->imtu < skb->len)
4358 if (!chan->ops->recv(chan->data, skb))
4370 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4372 struct l2cap_hdr *lh = (void *) skb->data;
4376 skb_pull(skb, L2CAP_HDR_SIZE);
4377 cid = __le16_to_cpu(lh->cid);
4378 len = __le16_to_cpu(lh->len);
4380 if (len != skb->len) {
4385 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4388 case L2CAP_CID_LE_SIGNALING:
4389 case L2CAP_CID_SIGNALING:
4390 l2cap_sig_channel(conn, skb);
4393 case L2CAP_CID_CONN_LESS:
4394 psm = get_unaligned_le16(skb->data);
4396 l2cap_conless_channel(conn, psm, skb);
4399 case L2CAP_CID_LE_DATA:
4400 l2cap_att_channel(conn, cid, skb);
4404 if (smp_sig_channel(conn, skb))
4405 l2cap_conn_del(conn->hcon, EACCES);
4409 l2cap_data_channel(conn, cid, skb);
4414 /* ---- L2CAP interface with lower layer (HCI) ---- */
4416 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4418 int exact = 0, lm1 = 0, lm2 = 0;
4419 struct l2cap_chan *c;
4421 if (type != ACL_LINK)
4424 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4426 /* Find listening sockets and check their link_mode */
4427 read_lock(&chan_list_lock);
4428 list_for_each_entry(c, &chan_list, global_l) {
4429 struct sock *sk = c->sk;
4431 if (c->state != BT_LISTEN)
4434 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4435 lm1 |= HCI_LM_ACCEPT;
4436 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4437 lm1 |= HCI_LM_MASTER;
4439 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4440 lm2 |= HCI_LM_ACCEPT;
4441 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4442 lm2 |= HCI_LM_MASTER;
4445 read_unlock(&chan_list_lock);
4447 return exact ? lm1 : lm2;
4450 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4452 struct l2cap_conn *conn;
4454 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4456 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4460 conn = l2cap_conn_add(hcon, status);
4462 l2cap_conn_ready(conn);
4464 l2cap_conn_del(hcon, bt_to_errno(status));
4469 static int l2cap_disconn_ind(struct hci_conn *hcon)
4471 struct l2cap_conn *conn = hcon->l2cap_data;
4473 BT_DBG("hcon %p", hcon);
4475 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4476 return HCI_ERROR_REMOTE_USER_TERM;
4478 return conn->disc_reason;
4481 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4483 BT_DBG("hcon %p reason %d", hcon, reason);
4485 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4488 l2cap_conn_del(hcon, bt_to_errno(reason));
4493 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4495 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4498 if (encrypt == 0x00) {
4499 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4500 __clear_chan_timer(chan);
4501 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4502 } else if (chan->sec_level == BT_SECURITY_HIGH)
4503 l2cap_chan_close(chan, ECONNREFUSED);
4505 if (chan->sec_level == BT_SECURITY_MEDIUM)
4506 __clear_chan_timer(chan);
4510 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4512 struct l2cap_conn *conn = hcon->l2cap_data;
4513 struct l2cap_chan *chan;
4518 BT_DBG("conn %p", conn);
4520 if (hcon->type == LE_LINK) {
4521 smp_distribute_keys(conn, 0);
4522 cancel_delayed_work_sync(&conn->security_timer);
4527 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4528 struct sock *sk = chan->sk;
4532 BT_DBG("chan->scid %d", chan->scid);
4534 if (chan->scid == L2CAP_CID_LE_DATA) {
4535 if (!status && encrypt) {
4536 chan->sec_level = hcon->sec_level;
4537 l2cap_chan_ready(sk);
4544 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4549 if (!status && (chan->state == BT_CONNECTED ||
4550 chan->state == BT_CONFIG)) {
4551 l2cap_check_encryption(chan, encrypt);
4556 if (chan->state == BT_CONNECT) {
4558 struct l2cap_conn_req req;
4559 req.scid = cpu_to_le16(chan->scid);
4560 req.psm = chan->psm;
4562 chan->ident = l2cap_get_ident(conn);
4563 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4565 l2cap_send_cmd(conn, chan->ident,
4566 L2CAP_CONN_REQ, sizeof(req), &req);
4568 __clear_chan_timer(chan);
4569 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4571 } else if (chan->state == BT_CONNECT2) {
4572 struct l2cap_conn_rsp rsp;
4576 if (bt_sk(sk)->defer_setup) {
4577 struct sock *parent = bt_sk(sk)->parent;
4578 res = L2CAP_CR_PEND;
4579 stat = L2CAP_CS_AUTHOR_PEND;
4581 parent->sk_data_ready(parent, 0);
4583 l2cap_state_change(chan, BT_CONFIG);
4584 res = L2CAP_CR_SUCCESS;
4585 stat = L2CAP_CS_NO_INFO;
4588 l2cap_state_change(chan, BT_DISCONN);
4589 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4590 res = L2CAP_CR_SEC_BLOCK;
4591 stat = L2CAP_CS_NO_INFO;
4594 rsp.scid = cpu_to_le16(chan->dcid);
4595 rsp.dcid = cpu_to_le16(chan->scid);
4596 rsp.result = cpu_to_le16(res);
4597 rsp.status = cpu_to_le16(stat);
4598 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4610 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4612 struct l2cap_conn *conn = hcon->l2cap_data;
4615 conn = l2cap_conn_add(hcon, 0);
4620 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4622 if (!(flags & ACL_CONT)) {
4623 struct l2cap_hdr *hdr;
4624 struct l2cap_chan *chan;
4629 BT_ERR("Unexpected start frame (len %d)", skb->len);
4630 kfree_skb(conn->rx_skb);
4631 conn->rx_skb = NULL;
4633 l2cap_conn_unreliable(conn, ECOMM);
4636 /* Start fragment always begin with Basic L2CAP header */
4637 if (skb->len < L2CAP_HDR_SIZE) {
4638 BT_ERR("Frame is too short (len %d)", skb->len);
4639 l2cap_conn_unreliable(conn, ECOMM);
4643 hdr = (struct l2cap_hdr *) skb->data;
4644 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4645 cid = __le16_to_cpu(hdr->cid);
4647 if (len == skb->len) {
4648 /* Complete frame received */
4649 l2cap_recv_frame(conn, skb);
4653 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4655 if (skb->len > len) {
4656 BT_ERR("Frame is too long (len %d, expected len %d)",
4658 l2cap_conn_unreliable(conn, ECOMM);
4662 chan = l2cap_get_chan_by_scid(conn, cid);
4664 if (chan && chan->sk) {
4665 struct sock *sk = chan->sk;
4667 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4668 BT_ERR("Frame exceeding recv MTU (len %d, "
4672 l2cap_conn_unreliable(conn, ECOMM);
4678 /* Allocate skb for the complete frame (with header) */
4679 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4683 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4685 conn->rx_len = len - skb->len;
4687 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4689 if (!conn->rx_len) {
4690 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4691 l2cap_conn_unreliable(conn, ECOMM);
4695 if (skb->len > conn->rx_len) {
4696 BT_ERR("Fragment is too long (len %d, expected %d)",
4697 skb->len, conn->rx_len);
4698 kfree_skb(conn->rx_skb);
4699 conn->rx_skb = NULL;
4701 l2cap_conn_unreliable(conn, ECOMM);
4705 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4707 conn->rx_len -= skb->len;
4709 if (!conn->rx_len) {
4710 /* Complete frame received */
4711 l2cap_recv_frame(conn, conn->rx_skb);
4712 conn->rx_skb = NULL;
4721 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4723 struct l2cap_chan *c;
4725 read_lock_bh(&chan_list_lock);
4727 list_for_each_entry(c, &chan_list, global_l) {
4728 struct sock *sk = c->sk;
4730 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4731 batostr(&bt_sk(sk)->src),
4732 batostr(&bt_sk(sk)->dst),
4733 c->state, __le16_to_cpu(c->psm),
4734 c->scid, c->dcid, c->imtu, c->omtu,
4735 c->sec_level, c->mode);
4738 read_unlock_bh(&chan_list_lock);
4743 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4745 return single_open(file, l2cap_debugfs_show, inode->i_private);
4748 static const struct file_operations l2cap_debugfs_fops = {
4749 .open = l2cap_debugfs_open,
4751 .llseek = seq_lseek,
4752 .release = single_release,
4755 static struct dentry *l2cap_debugfs;
4757 static struct hci_proto l2cap_hci_proto = {
4759 .id = HCI_PROTO_L2CAP,
4760 .connect_ind = l2cap_connect_ind,
4761 .connect_cfm = l2cap_connect_cfm,
4762 .disconn_ind = l2cap_disconn_ind,
4763 .disconn_cfm = l2cap_disconn_cfm,
4764 .security_cfm = l2cap_security_cfm,
4765 .recv_acldata = l2cap_recv_acldata
4768 int __init l2cap_init(void)
4772 err = l2cap_init_sockets();
4776 err = hci_register_proto(&l2cap_hci_proto);
4778 BT_ERR("L2CAP protocol registration failed");
4779 bt_sock_unregister(BTPROTO_L2CAP);
4784 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4785 bt_debugfs, NULL, &l2cap_debugfs_fops);
4787 BT_ERR("Failed to create L2CAP debug file");
4793 l2cap_cleanup_sockets();
4797 void l2cap_exit(void)
4799 debugfs_remove(l2cap_debugfs);
4801 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4802 BT_ERR("L2CAP protocol unregistration failed");
4804 l2cap_cleanup_sockets();
4807 module_param(disable_ertm, bool, 0644);
4808 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");