2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
297 BT_DBG("sk %p chan %p", sk, chan);
302 void l2cap_chan_destroy(struct l2cap_chan *chan)
304 write_lock_bh(&chan_list_lock);
305 list_del(&chan->global_l);
306 write_unlock_bh(&chan_list_lock);
311 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
313 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
314 chan->psm, chan->dcid);
316 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
320 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
321 if (conn->hcon->type == LE_LINK) {
323 chan->omtu = L2CAP_LE_DEFAULT_MTU;
324 chan->scid = L2CAP_CID_LE_DATA;
325 chan->dcid = L2CAP_CID_LE_DATA;
327 /* Alloc CID for connection-oriented socket */
328 chan->scid = l2cap_alloc_cid(conn);
329 chan->omtu = L2CAP_DEFAULT_MTU;
331 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
332 /* Connectionless socket */
333 chan->scid = L2CAP_CID_CONN_LESS;
334 chan->dcid = L2CAP_CID_CONN_LESS;
335 chan->omtu = L2CAP_DEFAULT_MTU;
337 /* Raw socket can send/recv signalling messages only */
338 chan->scid = L2CAP_CID_SIGNALING;
339 chan->dcid = L2CAP_CID_SIGNALING;
340 chan->omtu = L2CAP_DEFAULT_MTU;
343 chan->local_id = L2CAP_BESTEFFORT_ID;
344 chan->local_stype = L2CAP_SERV_BESTEFFORT;
345 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
346 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
347 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
348 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
352 list_add(&chan->list, &conn->chan_l);
356 * Must be called on the locked socket. */
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
363 __clear_chan_timer(chan);
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
368 /* Delete from channel list */
369 write_lock_bh(&conn->chan_lock);
370 list_del(&chan->list);
371 write_unlock_bh(&conn->chan_lock);
375 hci_conn_put(conn->hcon);
378 l2cap_state_change(chan, BT_CLOSED);
379 sock_set_flag(sk, SOCK_ZAPPED);
385 bt_accept_unlink(sk);
386 parent->sk_data_ready(parent, 0);
388 sk->sk_state_change(sk);
390 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
391 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
394 skb_queue_purge(&chan->tx_q);
396 if (chan->mode == L2CAP_MODE_ERTM) {
397 struct srej_list *l, *tmp;
399 __clear_retrans_timer(chan);
400 __clear_monitor_timer(chan);
401 __clear_ack_timer(chan);
403 skb_queue_purge(&chan->srej_q);
405 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
412 static void l2cap_chan_cleanup_listen(struct sock *parent)
416 BT_DBG("parent %p", parent);
418 /* Close not yet accepted channels */
419 while ((sk = bt_accept_dequeue(parent, NULL))) {
420 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
421 __clear_chan_timer(chan);
423 l2cap_chan_close(chan, ECONNRESET);
425 chan->ops->close(chan->data);
429 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
431 struct l2cap_conn *conn = chan->conn;
432 struct sock *sk = chan->sk;
434 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
436 switch (chan->state) {
438 l2cap_chan_cleanup_listen(sk);
440 l2cap_state_change(chan, BT_CLOSED);
441 sock_set_flag(sk, SOCK_ZAPPED);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 __clear_chan_timer(chan);
449 __set_chan_timer(chan, sk->sk_sndtimeo);
450 l2cap_send_disconn_req(conn, chan, reason);
452 l2cap_chan_del(chan, reason);
456 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
457 conn->hcon->type == ACL_LINK) {
458 struct l2cap_conn_rsp rsp;
461 if (bt_sk(sk)->defer_setup)
462 result = L2CAP_CR_SEC_BLOCK;
464 result = L2CAP_CR_BAD_PSM;
465 l2cap_state_change(chan, BT_DISCONN);
467 rsp.scid = cpu_to_le16(chan->dcid);
468 rsp.dcid = cpu_to_le16(chan->scid);
469 rsp.result = cpu_to_le16(result);
470 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
471 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
475 l2cap_chan_del(chan, reason);
480 l2cap_chan_del(chan, reason);
484 sock_set_flag(sk, SOCK_ZAPPED);
489 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
491 if (chan->chan_type == L2CAP_CHAN_RAW) {
492 switch (chan->sec_level) {
493 case BT_SECURITY_HIGH:
494 return HCI_AT_DEDICATED_BONDING_MITM;
495 case BT_SECURITY_MEDIUM:
496 return HCI_AT_DEDICATED_BONDING;
498 return HCI_AT_NO_BONDING;
500 } else if (chan->psm == cpu_to_le16(0x0001)) {
501 if (chan->sec_level == BT_SECURITY_LOW)
502 chan->sec_level = BT_SECURITY_SDP;
504 if (chan->sec_level == BT_SECURITY_HIGH)
505 return HCI_AT_NO_BONDING_MITM;
507 return HCI_AT_NO_BONDING;
509 switch (chan->sec_level) {
510 case BT_SECURITY_HIGH:
511 return HCI_AT_GENERAL_BONDING_MITM;
512 case BT_SECURITY_MEDIUM:
513 return HCI_AT_GENERAL_BONDING;
515 return HCI_AT_NO_BONDING;
520 /* Service level security */
521 static inline int l2cap_check_security(struct l2cap_chan *chan)
523 struct l2cap_conn *conn = chan->conn;
526 auth_type = l2cap_get_auth_type(chan);
528 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
531 static u8 l2cap_get_ident(struct l2cap_conn *conn)
535 /* Get next available identificator.
536 * 1 - 128 are used by kernel.
537 * 129 - 199 are reserved.
538 * 200 - 254 are used by utilities like l2ping, etc.
541 spin_lock_bh(&conn->lock);
543 if (++conn->tx_ident > 128)
548 spin_unlock_bh(&conn->lock);
553 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
555 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
558 BT_DBG("code 0x%2.2x", code);
563 if (lmp_no_flush_capable(conn->hcon->hdev))
564 flags = ACL_START_NO_FLUSH;
568 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
569 skb->priority = HCI_PRIO_MAX;
571 hci_send_acl(conn->hchan, skb, flags);
574 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
576 struct hci_conn *hcon = chan->conn->hcon;
579 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
582 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
583 lmp_no_flush_capable(hcon->hdev))
584 flags = ACL_START_NO_FLUSH;
588 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
589 hci_send_acl(chan->conn->hchan, skb, flags);
592 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
595 struct l2cap_hdr *lh;
596 struct l2cap_conn *conn = chan->conn;
599 if (chan->state != BT_CONNECTED)
602 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
603 hlen = L2CAP_EXT_HDR_SIZE;
605 hlen = L2CAP_ENH_HDR_SIZE;
607 if (chan->fcs == L2CAP_FCS_CRC16)
608 hlen += L2CAP_FCS_SIZE;
610 BT_DBG("chan %p, control 0x%8.8x", chan, control);
612 count = min_t(unsigned int, conn->mtu, hlen);
614 control |= __set_sframe(chan);
616 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
617 control |= __set_ctrl_final(chan);
619 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
620 control |= __set_ctrl_poll(chan);
622 skb = bt_skb_alloc(count, GFP_ATOMIC);
626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
627 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
628 lh->cid = cpu_to_le16(chan->dcid);
630 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
632 if (chan->fcs == L2CAP_FCS_CRC16) {
633 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
634 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
637 skb->priority = HCI_PRIO_MAX;
638 l2cap_do_send(chan, skb);
641 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
645 set_bit(CONN_RNR_SENT, &chan->conn_state);
647 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
649 control |= __set_reqseq(chan, chan->buffer_seq);
651 l2cap_send_sframe(chan, control);
654 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
656 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
659 static void l2cap_do_start(struct l2cap_chan *chan)
661 struct l2cap_conn *conn = chan->conn;
663 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
664 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
667 if (l2cap_check_security(chan) &&
668 __l2cap_no_conn_pending(chan)) {
669 struct l2cap_conn_req req;
670 req.scid = cpu_to_le16(chan->scid);
673 chan->ident = l2cap_get_ident(conn);
674 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
680 struct l2cap_info_req req;
681 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
683 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
684 conn->info_ident = l2cap_get_ident(conn);
686 mod_timer(&conn->info_timer, jiffies +
687 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
689 l2cap_send_cmd(conn, conn->info_ident,
690 L2CAP_INFO_REQ, sizeof(req), &req);
694 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
696 u32 local_feat_mask = l2cap_feat_mask;
698 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
701 case L2CAP_MODE_ERTM:
702 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
703 case L2CAP_MODE_STREAMING:
704 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
710 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
713 struct l2cap_disconn_req req;
720 if (chan->mode == L2CAP_MODE_ERTM) {
721 __clear_retrans_timer(chan);
722 __clear_monitor_timer(chan);
723 __clear_ack_timer(chan);
726 req.dcid = cpu_to_le16(chan->dcid);
727 req.scid = cpu_to_le16(chan->scid);
728 l2cap_send_cmd(conn, l2cap_get_ident(conn),
729 L2CAP_DISCONN_REQ, sizeof(req), &req);
731 l2cap_state_change(chan, BT_DISCONN);
735 /* ---- L2CAP connections ---- */
736 static void l2cap_conn_start(struct l2cap_conn *conn)
738 struct l2cap_chan *chan, *tmp;
740 BT_DBG("conn %p", conn);
742 read_lock(&conn->chan_lock);
744 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
745 struct sock *sk = chan->sk;
749 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
754 if (chan->state == BT_CONNECT) {
755 struct l2cap_conn_req req;
757 if (!l2cap_check_security(chan) ||
758 !__l2cap_no_conn_pending(chan)) {
763 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
764 && test_bit(CONF_STATE2_DEVICE,
765 &chan->conf_state)) {
766 /* l2cap_chan_close() calls list_del(chan)
767 * so release the lock */
768 read_unlock(&conn->chan_lock);
769 l2cap_chan_close(chan, ECONNRESET);
770 read_lock(&conn->chan_lock);
775 req.scid = cpu_to_le16(chan->scid);
778 chan->ident = l2cap_get_ident(conn);
779 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
784 } else if (chan->state == BT_CONNECT2) {
785 struct l2cap_conn_rsp rsp;
787 rsp.scid = cpu_to_le16(chan->dcid);
788 rsp.dcid = cpu_to_le16(chan->scid);
790 if (l2cap_check_security(chan)) {
791 if (bt_sk(sk)->defer_setup) {
792 struct sock *parent = bt_sk(sk)->parent;
793 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
794 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
796 parent->sk_data_ready(parent, 0);
799 l2cap_state_change(chan, BT_CONFIG);
800 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
801 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
805 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
808 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
811 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
812 rsp.result != L2CAP_CR_SUCCESS) {
817 set_bit(CONF_REQ_SENT, &chan->conf_state);
818 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
819 l2cap_build_conf_req(chan, buf), buf);
820 chan->num_conf_req++;
826 read_unlock(&conn->chan_lock);
829 /* Find socket with cid and source bdaddr.
830 * Returns closest match, locked.
832 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
834 struct l2cap_chan *c, *c1 = NULL;
836 read_lock(&chan_list_lock);
838 list_for_each_entry(c, &chan_list, global_l) {
839 struct sock *sk = c->sk;
841 if (state && c->state != state)
844 if (c->scid == cid) {
846 if (!bacmp(&bt_sk(sk)->src, src)) {
847 read_unlock(&chan_list_lock);
852 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
857 read_unlock(&chan_list_lock);
862 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
864 struct sock *parent, *sk;
865 struct l2cap_chan *chan, *pchan;
869 /* Check if we have socket listening on cid */
870 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
877 bh_lock_sock(parent);
879 /* Check for backlog size */
880 if (sk_acceptq_is_full(parent)) {
881 BT_DBG("backlog full %d", parent->sk_ack_backlog);
885 chan = pchan->ops->new_connection(pchan->data);
891 write_lock_bh(&conn->chan_lock);
893 hci_conn_hold(conn->hcon);
895 bacpy(&bt_sk(sk)->src, conn->src);
896 bacpy(&bt_sk(sk)->dst, conn->dst);
898 bt_accept_enqueue(parent, sk);
900 __l2cap_chan_add(conn, chan);
902 __set_chan_timer(chan, sk->sk_sndtimeo);
904 l2cap_state_change(chan, BT_CONNECTED);
905 parent->sk_data_ready(parent, 0);
907 write_unlock_bh(&conn->chan_lock);
910 bh_unlock_sock(parent);
913 static void l2cap_chan_ready(struct sock *sk)
915 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
916 struct sock *parent = bt_sk(sk)->parent;
918 BT_DBG("sk %p, parent %p", sk, parent);
920 chan->conf_state = 0;
921 __clear_chan_timer(chan);
923 l2cap_state_change(chan, BT_CONNECTED);
924 sk->sk_state_change(sk);
927 parent->sk_data_ready(parent, 0);
930 static void l2cap_conn_ready(struct l2cap_conn *conn)
932 struct l2cap_chan *chan;
934 BT_DBG("conn %p", conn);
936 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
937 l2cap_le_conn_ready(conn);
939 if (conn->hcon->out && conn->hcon->type == LE_LINK)
940 smp_conn_security(conn, conn->hcon->pending_sec_level);
942 read_lock(&conn->chan_lock);
944 list_for_each_entry(chan, &conn->chan_l, list) {
945 struct sock *sk = chan->sk;
949 if (conn->hcon->type == LE_LINK) {
950 if (smp_conn_security(conn, chan->sec_level))
951 l2cap_chan_ready(sk);
953 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
954 __clear_chan_timer(chan);
955 l2cap_state_change(chan, BT_CONNECTED);
956 sk->sk_state_change(sk);
958 } else if (chan->state == BT_CONNECT)
959 l2cap_do_start(chan);
964 read_unlock(&conn->chan_lock);
967 /* Notify sockets that we cannot guaranty reliability anymore */
968 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
970 struct l2cap_chan *chan;
972 BT_DBG("conn %p", conn);
974 read_lock(&conn->chan_lock);
976 list_for_each_entry(chan, &conn->chan_l, list) {
977 struct sock *sk = chan->sk;
979 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
983 read_unlock(&conn->chan_lock);
986 static void l2cap_info_timeout(unsigned long arg)
988 struct l2cap_conn *conn = (void *) arg;
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
993 l2cap_conn_start(conn);
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1007 kfree_skb(conn->rx_skb);
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1013 l2cap_chan_del(chan, err);
1015 chan->ops->close(chan->data);
1018 hci_chan_del(conn->hchan);
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 del_timer_sync(&conn->info_timer);
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 del_timer(&conn->security_timer);
1025 smp_chan_destroy(conn);
1028 hcon->l2cap_data = NULL;
1032 static void security_timeout(unsigned long arg)
1034 struct l2cap_conn *conn = (void *) arg;
1036 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1039 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1041 struct l2cap_conn *conn = hcon->l2cap_data;
1042 struct hci_chan *hchan;
1047 hchan = hci_chan_create(hcon);
1051 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1053 hci_chan_del(hchan);
1057 hcon->l2cap_data = conn;
1059 conn->hchan = hchan;
1061 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1063 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1064 conn->mtu = hcon->hdev->le_mtu;
1066 conn->mtu = hcon->hdev->acl_mtu;
1068 conn->src = &hcon->hdev->bdaddr;
1069 conn->dst = &hcon->dst;
1071 conn->feat_mask = 0;
1073 spin_lock_init(&conn->lock);
1074 rwlock_init(&conn->chan_lock);
1076 INIT_LIST_HEAD(&conn->chan_l);
1078 if (hcon->type == LE_LINK)
1079 setup_timer(&conn->security_timer, security_timeout,
1080 (unsigned long) conn);
1082 setup_timer(&conn->info_timer, l2cap_info_timeout,
1083 (unsigned long) conn);
1085 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1090 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1092 write_lock_bh(&conn->chan_lock);
1093 __l2cap_chan_add(conn, chan);
1094 write_unlock_bh(&conn->chan_lock);
1097 /* ---- Socket interface ---- */
1099 /* Find socket with psm and source bdaddr.
1100 * Returns closest match.
1102 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1104 struct l2cap_chan *c, *c1 = NULL;
1106 read_lock(&chan_list_lock);
1108 list_for_each_entry(c, &chan_list, global_l) {
1109 struct sock *sk = c->sk;
1111 if (state && c->state != state)
1114 if (c->psm == psm) {
1116 if (!bacmp(&bt_sk(sk)->src, src)) {
1117 read_unlock(&chan_list_lock);
1122 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1127 read_unlock(&chan_list_lock);
1132 int l2cap_chan_connect(struct l2cap_chan *chan)
1134 struct sock *sk = chan->sk;
1135 bdaddr_t *src = &bt_sk(sk)->src;
1136 bdaddr_t *dst = &bt_sk(sk)->dst;
1137 struct l2cap_conn *conn;
1138 struct hci_conn *hcon;
1139 struct hci_dev *hdev;
1143 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1146 hdev = hci_get_route(dst, src);
1148 return -EHOSTUNREACH;
1150 hci_dev_lock_bh(hdev);
1152 auth_type = l2cap_get_auth_type(chan);
1154 if (chan->dcid == L2CAP_CID_LE_DATA)
1155 hcon = hci_connect(hdev, LE_LINK, dst,
1156 chan->sec_level, auth_type);
1158 hcon = hci_connect(hdev, ACL_LINK, dst,
1159 chan->sec_level, auth_type);
1162 err = PTR_ERR(hcon);
1166 conn = l2cap_conn_add(hcon, 0);
1173 /* Update source addr of the socket */
1174 bacpy(src, conn->src);
1176 l2cap_chan_add(conn, chan);
1178 l2cap_state_change(chan, BT_CONNECT);
1179 __set_chan_timer(chan, sk->sk_sndtimeo);
1181 if (hcon->state == BT_CONNECTED) {
1182 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1183 __clear_chan_timer(chan);
1184 if (l2cap_check_security(chan))
1185 l2cap_state_change(chan, BT_CONNECTED);
1187 l2cap_do_start(chan);
1193 hci_dev_unlock_bh(hdev);
1198 int __l2cap_wait_ack(struct sock *sk)
1200 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1201 DECLARE_WAITQUEUE(wait, current);
1205 add_wait_queue(sk_sleep(sk), &wait);
1206 set_current_state(TASK_INTERRUPTIBLE);
1207 while (chan->unacked_frames > 0 && chan->conn) {
1211 if (signal_pending(current)) {
1212 err = sock_intr_errno(timeo);
1217 timeo = schedule_timeout(timeo);
1219 set_current_state(TASK_INTERRUPTIBLE);
1221 err = sock_error(sk);
1225 set_current_state(TASK_RUNNING);
1226 remove_wait_queue(sk_sleep(sk), &wait);
1230 static void l2cap_monitor_timeout(unsigned long arg)
1232 struct l2cap_chan *chan = (void *) arg;
1233 struct sock *sk = chan->sk;
1235 BT_DBG("chan %p", chan);
1238 if (chan->retry_count >= chan->remote_max_tx) {
1239 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1244 chan->retry_count++;
1245 __set_monitor_timer(chan);
1247 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1251 static void l2cap_retrans_timeout(unsigned long arg)
1253 struct l2cap_chan *chan = (void *) arg;
1254 struct sock *sk = chan->sk;
1256 BT_DBG("chan %p", chan);
1259 chan->retry_count = 1;
1260 __set_monitor_timer(chan);
1262 set_bit(CONN_WAIT_F, &chan->conn_state);
1264 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1268 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1270 struct sk_buff *skb;
1272 while ((skb = skb_peek(&chan->tx_q)) &&
1273 chan->unacked_frames) {
1274 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1277 skb = skb_dequeue(&chan->tx_q);
1280 chan->unacked_frames--;
1283 if (!chan->unacked_frames)
1284 __clear_retrans_timer(chan);
1287 static void l2cap_streaming_send(struct l2cap_chan *chan)
1289 struct sk_buff *skb;
1293 while ((skb = skb_dequeue(&chan->tx_q))) {
1294 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1295 control |= __set_txseq(chan, chan->next_tx_seq);
1296 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1298 if (chan->fcs == L2CAP_FCS_CRC16) {
1299 fcs = crc16(0, (u8 *)skb->data,
1300 skb->len - L2CAP_FCS_SIZE);
1301 put_unaligned_le16(fcs,
1302 skb->data + skb->len - L2CAP_FCS_SIZE);
1305 l2cap_do_send(chan, skb);
1307 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1311 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1313 struct sk_buff *skb, *tx_skb;
1317 skb = skb_peek(&chan->tx_q);
1322 if (bt_cb(skb)->tx_seq == tx_seq)
1325 if (skb_queue_is_last(&chan->tx_q, skb))
1328 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1330 if (chan->remote_max_tx &&
1331 bt_cb(skb)->retries == chan->remote_max_tx) {
1332 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1336 tx_skb = skb_clone(skb, GFP_ATOMIC);
1337 bt_cb(skb)->retries++;
1339 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1340 control &= __get_sar_mask(chan);
1342 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1343 control |= __set_ctrl_final(chan);
1345 control |= __set_reqseq(chan, chan->buffer_seq);
1346 control |= __set_txseq(chan, tx_seq);
1348 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1350 if (chan->fcs == L2CAP_FCS_CRC16) {
1351 fcs = crc16(0, (u8 *)tx_skb->data,
1352 tx_skb->len - L2CAP_FCS_SIZE);
1353 put_unaligned_le16(fcs,
1354 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1357 l2cap_do_send(chan, tx_skb);
1360 static int l2cap_ertm_send(struct l2cap_chan *chan)
1362 struct sk_buff *skb, *tx_skb;
1367 if (chan->state != BT_CONNECTED)
1370 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1372 if (chan->remote_max_tx &&
1373 bt_cb(skb)->retries == chan->remote_max_tx) {
1374 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1378 tx_skb = skb_clone(skb, GFP_ATOMIC);
1380 bt_cb(skb)->retries++;
1382 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1383 control &= __get_sar_mask(chan);
1385 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1386 control |= __set_ctrl_final(chan);
1388 control |= __set_reqseq(chan, chan->buffer_seq);
1389 control |= __set_txseq(chan, chan->next_tx_seq);
1391 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1393 if (chan->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)skb->data,
1395 tx_skb->len - L2CAP_FCS_SIZE);
1396 put_unaligned_le16(fcs, skb->data +
1397 tx_skb->len - L2CAP_FCS_SIZE);
1400 l2cap_do_send(chan, tx_skb);
1402 __set_retrans_timer(chan);
1404 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1406 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1408 if (bt_cb(skb)->retries == 1)
1409 chan->unacked_frames++;
1411 chan->frames_sent++;
1413 if (skb_queue_is_last(&chan->tx_q, skb))
1414 chan->tx_send_head = NULL;
1416 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1424 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1428 if (!skb_queue_empty(&chan->tx_q))
1429 chan->tx_send_head = chan->tx_q.next;
1431 chan->next_tx_seq = chan->expected_ack_seq;
1432 ret = l2cap_ertm_send(chan);
1436 static void l2cap_send_ack(struct l2cap_chan *chan)
1440 control |= __set_reqseq(chan, chan->buffer_seq);
1442 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1443 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1444 set_bit(CONN_RNR_SENT, &chan->conn_state);
1445 l2cap_send_sframe(chan, control);
1449 if (l2cap_ertm_send(chan) > 0)
1452 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1453 l2cap_send_sframe(chan, control);
1456 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1458 struct srej_list *tail;
1461 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1462 control |= __set_ctrl_final(chan);
1464 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1465 control |= __set_reqseq(chan, tail->tx_seq);
1467 l2cap_send_sframe(chan, control);
1470 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1472 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1473 struct sk_buff **frag;
1476 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1482 /* Continuation fragments (no L2CAP header) */
1483 frag = &skb_shinfo(skb)->frag_list;
1485 count = min_t(unsigned int, conn->mtu, len);
1487 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1490 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1493 (*frag)->priority = skb->priority;
1498 frag = &(*frag)->next;
1504 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1505 struct msghdr *msg, size_t len,
1508 struct sock *sk = chan->sk;
1509 struct l2cap_conn *conn = chan->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1512 struct l2cap_hdr *lh;
1514 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1516 count = min_t(unsigned int, (conn->mtu - hlen), len);
1517 skb = bt_skb_send_alloc(sk, count + hlen,
1518 msg->msg_flags & MSG_DONTWAIT, &err);
1520 return ERR_PTR(err);
1522 skb->priority = priority;
1524 /* Create L2CAP header */
1525 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1526 lh->cid = cpu_to_le16(chan->dcid);
1527 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1528 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1530 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1531 if (unlikely(err < 0)) {
1533 return ERR_PTR(err);
1538 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1539 struct msghdr *msg, size_t len,
1542 struct sock *sk = chan->sk;
1543 struct l2cap_conn *conn = chan->conn;
1544 struct sk_buff *skb;
1545 int err, count, hlen = L2CAP_HDR_SIZE;
1546 struct l2cap_hdr *lh;
1548 BT_DBG("sk %p len %d", sk, (int)len);
1550 count = min_t(unsigned int, (conn->mtu - hlen), len);
1551 skb = bt_skb_send_alloc(sk, count + hlen,
1552 msg->msg_flags & MSG_DONTWAIT, &err);
1554 return ERR_PTR(err);
1556 skb->priority = priority;
1558 /* Create L2CAP header */
1559 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1560 lh->cid = cpu_to_le16(chan->dcid);
1561 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1563 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1564 if (unlikely(err < 0)) {
1566 return ERR_PTR(err);
1571 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1572 struct msghdr *msg, size_t len,
1573 u32 control, u16 sdulen)
1575 struct sock *sk = chan->sk;
1576 struct l2cap_conn *conn = chan->conn;
1577 struct sk_buff *skb;
1578 int err, count, hlen;
1579 struct l2cap_hdr *lh;
1581 BT_DBG("sk %p len %d", sk, (int)len);
1584 return ERR_PTR(-ENOTCONN);
1586 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1587 hlen = L2CAP_EXT_HDR_SIZE;
1589 hlen = L2CAP_ENH_HDR_SIZE;
1592 hlen += L2CAP_SDULEN_SIZE;
1594 if (chan->fcs == L2CAP_FCS_CRC16)
1595 hlen += L2CAP_FCS_SIZE;
1597 count = min_t(unsigned int, (conn->mtu - hlen), len);
1598 skb = bt_skb_send_alloc(sk, count + hlen,
1599 msg->msg_flags & MSG_DONTWAIT, &err);
1601 return ERR_PTR(err);
1603 /* Create L2CAP header */
1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1605 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1608 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1611 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1613 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1614 if (unlikely(err < 0)) {
1616 return ERR_PTR(err);
1619 if (chan->fcs == L2CAP_FCS_CRC16)
1620 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1622 bt_cb(skb)->retries = 0;
1626 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1628 struct sk_buff *skb;
1629 struct sk_buff_head sar_queue;
1633 skb_queue_head_init(&sar_queue);
1634 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1635 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1637 return PTR_ERR(skb);
1639 __skb_queue_tail(&sar_queue, skb);
1640 len -= chan->remote_mps;
1641 size += chan->remote_mps;
1646 if (len > chan->remote_mps) {
1647 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1648 buflen = chan->remote_mps;
1650 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1654 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1656 skb_queue_purge(&sar_queue);
1657 return PTR_ERR(skb);
1660 __skb_queue_tail(&sar_queue, skb);
1664 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1665 if (chan->tx_send_head == NULL)
1666 chan->tx_send_head = sar_queue.next;
1671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1674 struct sk_buff *skb;
1678 /* Connectionless channel */
1679 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1680 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1682 return PTR_ERR(skb);
1684 l2cap_do_send(chan, skb);
1688 switch (chan->mode) {
1689 case L2CAP_MODE_BASIC:
1690 /* Check outgoing MTU */
1691 if (len > chan->omtu)
1694 /* Create a basic PDU */
1695 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1697 return PTR_ERR(skb);
1699 l2cap_do_send(chan, skb);
1703 case L2CAP_MODE_ERTM:
1704 case L2CAP_MODE_STREAMING:
1705 /* Entire SDU fits into one PDU */
1706 if (len <= chan->remote_mps) {
1707 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1708 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1711 return PTR_ERR(skb);
1713 __skb_queue_tail(&chan->tx_q, skb);
1715 if (chan->tx_send_head == NULL)
1716 chan->tx_send_head = skb;
1719 /* Segment SDU into multiples PDUs */
1720 err = l2cap_sar_segment_sdu(chan, msg, len);
1725 if (chan->mode == L2CAP_MODE_STREAMING) {
1726 l2cap_streaming_send(chan);
1731 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1732 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1737 err = l2cap_ertm_send(chan);
1744 BT_DBG("bad state %1.1x", chan->mode);
1751 /* Copy frame to all raw sockets on that connection */
1752 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1754 struct sk_buff *nskb;
1755 struct l2cap_chan *chan;
1757 BT_DBG("conn %p", conn);
1759 read_lock(&conn->chan_lock);
1760 list_for_each_entry(chan, &conn->chan_l, list) {
1761 struct sock *sk = chan->sk;
1762 if (chan->chan_type != L2CAP_CHAN_RAW)
1765 /* Don't send frame to the socket it came from */
1768 nskb = skb_clone(skb, GFP_ATOMIC);
1772 if (chan->ops->recv(chan->data, nskb))
1775 read_unlock(&conn->chan_lock);
1778 /* ---- L2CAP signalling commands ---- */
1779 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1780 u8 code, u8 ident, u16 dlen, void *data)
1782 struct sk_buff *skb, **frag;
1783 struct l2cap_cmd_hdr *cmd;
1784 struct l2cap_hdr *lh;
1787 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1788 conn, code, ident, dlen);
1790 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1791 count = min_t(unsigned int, conn->mtu, len);
1793 skb = bt_skb_alloc(count, GFP_ATOMIC);
1797 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1798 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1800 if (conn->hcon->type == LE_LINK)
1801 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1803 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1805 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1808 cmd->len = cpu_to_le16(dlen);
1811 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1812 memcpy(skb_put(skb, count), data, count);
1818 /* Continuation fragments (no L2CAP header) */
1819 frag = &skb_shinfo(skb)->frag_list;
1821 count = min_t(unsigned int, conn->mtu, len);
1823 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1827 memcpy(skb_put(*frag, count), data, count);
1832 frag = &(*frag)->next;
1842 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1844 struct l2cap_conf_opt *opt = *ptr;
1847 len = L2CAP_CONF_OPT_SIZE + opt->len;
1855 *val = *((u8 *) opt->val);
1859 *val = get_unaligned_le16(opt->val);
1863 *val = get_unaligned_le32(opt->val);
1867 *val = (unsigned long) opt->val;
1871 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1875 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1877 struct l2cap_conf_opt *opt = *ptr;
1879 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1886 *((u8 *) opt->val) = val;
1890 put_unaligned_le16(val, opt->val);
1894 put_unaligned_le32(val, opt->val);
1898 memcpy(opt->val, (void *) val, len);
1902 *ptr += L2CAP_CONF_OPT_SIZE + len;
1905 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1907 struct l2cap_conf_efs efs;
1909 switch(chan->mode) {
1910 case L2CAP_MODE_ERTM:
1911 efs.id = chan->local_id;
1912 efs.stype = chan->local_stype;
1913 efs.msdu = cpu_to_le16(chan->local_msdu);
1914 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1915 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1916 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1919 case L2CAP_MODE_STREAMING:
1921 efs.stype = L2CAP_SERV_BESTEFFORT;
1922 efs.msdu = cpu_to_le16(chan->local_msdu);
1923 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1932 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1933 (unsigned long) &efs);
1936 static void l2cap_ack_timeout(unsigned long arg)
1938 struct l2cap_chan *chan = (void *) arg;
1940 bh_lock_sock(chan->sk);
1941 l2cap_send_ack(chan);
1942 bh_unlock_sock(chan->sk);
1945 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1947 struct sock *sk = chan->sk;
1949 chan->expected_ack_seq = 0;
1950 chan->unacked_frames = 0;
1951 chan->buffer_seq = 0;
1952 chan->num_acked = 0;
1953 chan->frames_sent = 0;
1955 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1956 (unsigned long) chan);
1957 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1958 (unsigned long) chan);
1959 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1961 skb_queue_head_init(&chan->srej_q);
1963 INIT_LIST_HEAD(&chan->srej_l);
1966 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1969 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1972 case L2CAP_MODE_STREAMING:
1973 case L2CAP_MODE_ERTM:
1974 if (l2cap_mode_supported(mode, remote_feat_mask))
1978 return L2CAP_MODE_BASIC;
1982 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1984 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1987 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1989 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1992 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1994 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1995 __l2cap_ews_supported(chan)) {
1996 /* use extended control field */
1997 set_bit(FLAG_EXT_CTRL, &chan->flags);
1998 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2000 chan->tx_win = min_t(u16, chan->tx_win,
2001 L2CAP_DEFAULT_TX_WINDOW);
2002 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2006 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2008 struct l2cap_conf_req *req = data;
2009 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2010 void *ptr = req->data;
2013 BT_DBG("chan %p", chan);
2015 if (chan->num_conf_req || chan->num_conf_rsp)
2018 switch (chan->mode) {
2019 case L2CAP_MODE_STREAMING:
2020 case L2CAP_MODE_ERTM:
2021 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2024 if (__l2cap_efs_supported(chan))
2025 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2029 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2034 if (chan->imtu != L2CAP_DEFAULT_MTU)
2035 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2037 switch (chan->mode) {
2038 case L2CAP_MODE_BASIC:
2039 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2040 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2043 rfc.mode = L2CAP_MODE_BASIC;
2045 rfc.max_transmit = 0;
2046 rfc.retrans_timeout = 0;
2047 rfc.monitor_timeout = 0;
2048 rfc.max_pdu_size = 0;
2050 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2051 (unsigned long) &rfc);
2054 case L2CAP_MODE_ERTM:
2055 rfc.mode = L2CAP_MODE_ERTM;
2056 rfc.max_transmit = chan->max_tx;
2057 rfc.retrans_timeout = 0;
2058 rfc.monitor_timeout = 0;
2060 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2061 L2CAP_EXT_HDR_SIZE -
2064 rfc.max_pdu_size = cpu_to_le16(size);
2066 l2cap_txwin_setup(chan);
2068 rfc.txwin_size = min_t(u16, chan->tx_win,
2069 L2CAP_DEFAULT_TX_WINDOW);
2071 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2072 (unsigned long) &rfc);
2074 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2075 l2cap_add_opt_efs(&ptr, chan);
2077 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2080 if (chan->fcs == L2CAP_FCS_NONE ||
2081 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2082 chan->fcs = L2CAP_FCS_NONE;
2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2086 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2091 case L2CAP_MODE_STREAMING:
2092 rfc.mode = L2CAP_MODE_STREAMING;
2094 rfc.max_transmit = 0;
2095 rfc.retrans_timeout = 0;
2096 rfc.monitor_timeout = 0;
2098 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2099 L2CAP_EXT_HDR_SIZE -
2102 rfc.max_pdu_size = cpu_to_le16(size);
2104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2105 (unsigned long) &rfc);
2107 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2108 l2cap_add_opt_efs(&ptr, chan);
2110 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2113 if (chan->fcs == L2CAP_FCS_NONE ||
2114 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2115 chan->fcs = L2CAP_FCS_NONE;
2116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2121 req->dcid = cpu_to_le16(chan->dcid);
2122 req->flags = cpu_to_le16(0);
2127 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2129 struct l2cap_conf_rsp *rsp = data;
2130 void *ptr = rsp->data;
2131 void *req = chan->conf_req;
2132 int len = chan->conf_len;
2133 int type, hint, olen;
2135 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2136 struct l2cap_conf_efs efs;
2138 u16 mtu = L2CAP_DEFAULT_MTU;
2139 u16 result = L2CAP_CONF_SUCCESS;
2142 BT_DBG("chan %p", chan);
2144 while (len >= L2CAP_CONF_OPT_SIZE) {
2145 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2147 hint = type & L2CAP_CONF_HINT;
2148 type &= L2CAP_CONF_MASK;
2151 case L2CAP_CONF_MTU:
2155 case L2CAP_CONF_FLUSH_TO:
2156 chan->flush_to = val;
2159 case L2CAP_CONF_QOS:
2162 case L2CAP_CONF_RFC:
2163 if (olen == sizeof(rfc))
2164 memcpy(&rfc, (void *) val, olen);
2167 case L2CAP_CONF_FCS:
2168 if (val == L2CAP_FCS_NONE)
2169 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2172 case L2CAP_CONF_EFS:
2174 if (olen == sizeof(efs))
2175 memcpy(&efs, (void *) val, olen);
2178 case L2CAP_CONF_EWS:
2180 return -ECONNREFUSED;
2182 set_bit(FLAG_EXT_CTRL, &chan->flags);
2183 set_bit(CONF_EWS_RECV, &chan->conf_state);
2184 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2185 chan->remote_tx_win = val;
2192 result = L2CAP_CONF_UNKNOWN;
2193 *((u8 *) ptr++) = type;
2198 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2201 switch (chan->mode) {
2202 case L2CAP_MODE_STREAMING:
2203 case L2CAP_MODE_ERTM:
2204 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2205 chan->mode = l2cap_select_mode(rfc.mode,
2206 chan->conn->feat_mask);
2211 if (__l2cap_efs_supported(chan))
2212 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2214 return -ECONNREFUSED;
2217 if (chan->mode != rfc.mode)
2218 return -ECONNREFUSED;
2224 if (chan->mode != rfc.mode) {
2225 result = L2CAP_CONF_UNACCEPT;
2226 rfc.mode = chan->mode;
2228 if (chan->num_conf_rsp == 1)
2229 return -ECONNREFUSED;
2231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2232 sizeof(rfc), (unsigned long) &rfc);
2235 if (result == L2CAP_CONF_SUCCESS) {
2236 /* Configure output options and let the other side know
2237 * which ones we don't like. */
2239 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2240 result = L2CAP_CONF_UNACCEPT;
2243 set_bit(CONF_MTU_DONE, &chan->conf_state);
2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2248 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2249 efs.stype != L2CAP_SERV_NOTRAFIC &&
2250 efs.stype != chan->local_stype) {
2252 result = L2CAP_CONF_UNACCEPT;
2254 if (chan->num_conf_req >= 1)
2255 return -ECONNREFUSED;
2257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2259 (unsigned long) &efs);
2261 /* Send PENDING Conf Rsp */
2262 result = L2CAP_CONF_PENDING;
2263 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2268 case L2CAP_MODE_BASIC:
2269 chan->fcs = L2CAP_FCS_NONE;
2270 set_bit(CONF_MODE_DONE, &chan->conf_state);
2273 case L2CAP_MODE_ERTM:
2274 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2275 chan->remote_tx_win = rfc.txwin_size;
2277 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2279 chan->remote_max_tx = rfc.max_transmit;
2281 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2283 L2CAP_EXT_HDR_SIZE -
2286 rfc.max_pdu_size = cpu_to_le16(size);
2287 chan->remote_mps = size;
2289 rfc.retrans_timeout =
2290 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2291 rfc.monitor_timeout =
2292 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2294 set_bit(CONF_MODE_DONE, &chan->conf_state);
2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2297 sizeof(rfc), (unsigned long) &rfc);
2299 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2300 chan->remote_id = efs.id;
2301 chan->remote_stype = efs.stype;
2302 chan->remote_msdu = le16_to_cpu(efs.msdu);
2303 chan->remote_flush_to =
2304 le32_to_cpu(efs.flush_to);
2305 chan->remote_acc_lat =
2306 le32_to_cpu(efs.acc_lat);
2307 chan->remote_sdu_itime =
2308 le32_to_cpu(efs.sdu_itime);
2309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2310 sizeof(efs), (unsigned long) &efs);
2314 case L2CAP_MODE_STREAMING:
2315 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2317 L2CAP_EXT_HDR_SIZE -
2320 rfc.max_pdu_size = cpu_to_le16(size);
2321 chan->remote_mps = size;
2323 set_bit(CONF_MODE_DONE, &chan->conf_state);
2325 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2326 sizeof(rfc), (unsigned long) &rfc);
2331 result = L2CAP_CONF_UNACCEPT;
2333 memset(&rfc, 0, sizeof(rfc));
2334 rfc.mode = chan->mode;
2337 if (result == L2CAP_CONF_SUCCESS)
2338 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2340 rsp->scid = cpu_to_le16(chan->dcid);
2341 rsp->result = cpu_to_le16(result);
2342 rsp->flags = cpu_to_le16(0x0000);
2347 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2349 struct l2cap_conf_req *req = data;
2350 void *ptr = req->data;
2353 struct l2cap_conf_rfc rfc;
2355 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2357 while (len >= L2CAP_CONF_OPT_SIZE) {
2358 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2361 case L2CAP_CONF_MTU:
2362 if (val < L2CAP_DEFAULT_MIN_MTU) {
2363 *result = L2CAP_CONF_UNACCEPT;
2364 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2367 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2370 case L2CAP_CONF_FLUSH_TO:
2371 chan->flush_to = val;
2372 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2376 case L2CAP_CONF_RFC:
2377 if (olen == sizeof(rfc))
2378 memcpy(&rfc, (void *)val, olen);
2380 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2381 rfc.mode != chan->mode)
2382 return -ECONNREFUSED;
2386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2387 sizeof(rfc), (unsigned long) &rfc);
2390 case L2CAP_CONF_EWS:
2391 chan->tx_win = min_t(u16, val,
2392 L2CAP_DEFAULT_EXT_WINDOW);
2393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2399 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2400 return -ECONNREFUSED;
2402 chan->mode = rfc.mode;
2404 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2406 case L2CAP_MODE_ERTM:
2407 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2408 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2409 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2411 case L2CAP_MODE_STREAMING:
2412 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2416 req->dcid = cpu_to_le16(chan->dcid);
2417 req->flags = cpu_to_le16(0x0000);
2422 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2424 struct l2cap_conf_rsp *rsp = data;
2425 void *ptr = rsp->data;
2427 BT_DBG("chan %p", chan);
2429 rsp->scid = cpu_to_le16(chan->dcid);
2430 rsp->result = cpu_to_le16(result);
2431 rsp->flags = cpu_to_le16(flags);
2436 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2438 struct l2cap_conn_rsp rsp;
2439 struct l2cap_conn *conn = chan->conn;
2442 rsp.scid = cpu_to_le16(chan->dcid);
2443 rsp.dcid = cpu_to_le16(chan->scid);
2444 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2445 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2446 l2cap_send_cmd(conn, chan->ident,
2447 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2449 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2452 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2453 l2cap_build_conf_req(chan, buf), buf);
2454 chan->num_conf_req++;
2457 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2461 struct l2cap_conf_rfc rfc;
2463 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2465 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2468 while (len >= L2CAP_CONF_OPT_SIZE) {
2469 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2472 case L2CAP_CONF_RFC:
2473 if (olen == sizeof(rfc))
2474 memcpy(&rfc, (void *)val, olen);
2481 case L2CAP_MODE_ERTM:
2482 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2483 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2484 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2486 case L2CAP_MODE_STREAMING:
2487 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2491 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2493 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2495 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2498 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2499 cmd->ident == conn->info_ident) {
2500 del_timer(&conn->info_timer);
2502 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2503 conn->info_ident = 0;
2505 l2cap_conn_start(conn);
2511 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2513 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2514 struct l2cap_conn_rsp rsp;
2515 struct l2cap_chan *chan = NULL, *pchan;
2516 struct sock *parent, *sk = NULL;
2517 int result, status = L2CAP_CS_NO_INFO;
2519 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2520 __le16 psm = req->psm;
2522 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2524 /* Check if we have socket listening on psm */
2525 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2527 result = L2CAP_CR_BAD_PSM;
2533 bh_lock_sock(parent);
2535 /* Check if the ACL is secure enough (if not SDP) */
2536 if (psm != cpu_to_le16(0x0001) &&
2537 !hci_conn_check_link_mode(conn->hcon)) {
2538 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2539 result = L2CAP_CR_SEC_BLOCK;
2543 result = L2CAP_CR_NO_MEM;
2545 /* Check for backlog size */
2546 if (sk_acceptq_is_full(parent)) {
2547 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2551 chan = pchan->ops->new_connection(pchan->data);
2557 write_lock_bh(&conn->chan_lock);
2559 /* Check if we already have channel with that dcid */
2560 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2561 write_unlock_bh(&conn->chan_lock);
2562 sock_set_flag(sk, SOCK_ZAPPED);
2563 chan->ops->close(chan->data);
2567 hci_conn_hold(conn->hcon);
2569 bacpy(&bt_sk(sk)->src, conn->src);
2570 bacpy(&bt_sk(sk)->dst, conn->dst);
2574 bt_accept_enqueue(parent, sk);
2576 __l2cap_chan_add(conn, chan);
2580 __set_chan_timer(chan, sk->sk_sndtimeo);
2582 chan->ident = cmd->ident;
2584 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2585 if (l2cap_check_security(chan)) {
2586 if (bt_sk(sk)->defer_setup) {
2587 l2cap_state_change(chan, BT_CONNECT2);
2588 result = L2CAP_CR_PEND;
2589 status = L2CAP_CS_AUTHOR_PEND;
2590 parent->sk_data_ready(parent, 0);
2592 l2cap_state_change(chan, BT_CONFIG);
2593 result = L2CAP_CR_SUCCESS;
2594 status = L2CAP_CS_NO_INFO;
2597 l2cap_state_change(chan, BT_CONNECT2);
2598 result = L2CAP_CR_PEND;
2599 status = L2CAP_CS_AUTHEN_PEND;
2602 l2cap_state_change(chan, BT_CONNECT2);
2603 result = L2CAP_CR_PEND;
2604 status = L2CAP_CS_NO_INFO;
2607 write_unlock_bh(&conn->chan_lock);
2610 bh_unlock_sock(parent);
2613 rsp.scid = cpu_to_le16(scid);
2614 rsp.dcid = cpu_to_le16(dcid);
2615 rsp.result = cpu_to_le16(result);
2616 rsp.status = cpu_to_le16(status);
2617 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2619 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2620 struct l2cap_info_req info;
2621 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2623 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2624 conn->info_ident = l2cap_get_ident(conn);
2626 mod_timer(&conn->info_timer, jiffies +
2627 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2629 l2cap_send_cmd(conn, conn->info_ident,
2630 L2CAP_INFO_REQ, sizeof(info), &info);
2633 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2634 result == L2CAP_CR_SUCCESS) {
2636 set_bit(CONF_REQ_SENT, &chan->conf_state);
2637 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2638 l2cap_build_conf_req(chan, buf), buf);
2639 chan->num_conf_req++;
2645 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2647 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2648 u16 scid, dcid, result, status;
2649 struct l2cap_chan *chan;
2653 scid = __le16_to_cpu(rsp->scid);
2654 dcid = __le16_to_cpu(rsp->dcid);
2655 result = __le16_to_cpu(rsp->result);
2656 status = __le16_to_cpu(rsp->status);
2658 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2661 chan = l2cap_get_chan_by_scid(conn, scid);
2665 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2673 case L2CAP_CR_SUCCESS:
2674 l2cap_state_change(chan, BT_CONFIG);
2677 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2679 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2682 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2683 l2cap_build_conf_req(chan, req), req);
2684 chan->num_conf_req++;
2688 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2692 /* don't delete l2cap channel if sk is owned by user */
2693 if (sock_owned_by_user(sk)) {
2694 l2cap_state_change(chan, BT_DISCONN);
2695 __clear_chan_timer(chan);
2696 __set_chan_timer(chan, HZ / 5);
2700 l2cap_chan_del(chan, ECONNREFUSED);
2708 static inline void set_default_fcs(struct l2cap_chan *chan)
2710 /* FCS is enabled only in ERTM or streaming mode, if one or both
2713 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2714 chan->fcs = L2CAP_FCS_NONE;
2715 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2716 chan->fcs = L2CAP_FCS_CRC16;
2719 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2721 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2724 struct l2cap_chan *chan;
2728 dcid = __le16_to_cpu(req->dcid);
2729 flags = __le16_to_cpu(req->flags);
2731 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2733 chan = l2cap_get_chan_by_scid(conn, dcid);
2739 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2740 struct l2cap_cmd_rej_cid rej;
2742 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2743 rej.scid = cpu_to_le16(chan->scid);
2744 rej.dcid = cpu_to_le16(chan->dcid);
2746 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2751 /* Reject if config buffer is too small. */
2752 len = cmd_len - sizeof(*req);
2753 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2754 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2755 l2cap_build_conf_rsp(chan, rsp,
2756 L2CAP_CONF_REJECT, flags), rsp);
2761 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2762 chan->conf_len += len;
2764 if (flags & 0x0001) {
2765 /* Incomplete config. Send empty response. */
2766 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2767 l2cap_build_conf_rsp(chan, rsp,
2768 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2772 /* Complete config. */
2773 len = l2cap_parse_conf_req(chan, rsp);
2775 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2779 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2780 chan->num_conf_rsp++;
2782 /* Reset config buffer. */
2785 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2788 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2789 set_default_fcs(chan);
2791 l2cap_state_change(chan, BT_CONNECTED);
2793 chan->next_tx_seq = 0;
2794 chan->expected_tx_seq = 0;
2795 skb_queue_head_init(&chan->tx_q);
2796 if (chan->mode == L2CAP_MODE_ERTM)
2797 l2cap_ertm_init(chan);
2799 l2cap_chan_ready(sk);
2803 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2805 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2806 l2cap_build_conf_req(chan, buf), buf);
2807 chan->num_conf_req++;
2810 /* Got Conf Rsp PENDING from remote side and asume we sent
2811 Conf Rsp PENDING in the code above */
2812 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2813 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2815 /* check compatibility */
2817 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2818 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2820 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2821 l2cap_build_conf_rsp(chan, rsp,
2822 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2830 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2832 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2833 u16 scid, flags, result;
2834 struct l2cap_chan *chan;
2836 int len = cmd->len - sizeof(*rsp);
2838 scid = __le16_to_cpu(rsp->scid);
2839 flags = __le16_to_cpu(rsp->flags);
2840 result = __le16_to_cpu(rsp->result);
2842 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2843 scid, flags, result);
2845 chan = l2cap_get_chan_by_scid(conn, scid);
2852 case L2CAP_CONF_SUCCESS:
2853 l2cap_conf_rfc_get(chan, rsp->data, len);
2854 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2857 case L2CAP_CONF_PENDING:
2858 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2860 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2863 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2866 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2870 /* check compatibility */
2872 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2873 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2875 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2876 l2cap_build_conf_rsp(chan, buf,
2877 L2CAP_CONF_SUCCESS, 0x0000), buf);
2881 case L2CAP_CONF_UNACCEPT:
2882 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2885 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2886 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2890 /* throw out any old stored conf requests */
2891 result = L2CAP_CONF_SUCCESS;
2892 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2895 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2899 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2900 L2CAP_CONF_REQ, len, req);
2901 chan->num_conf_req++;
2902 if (result != L2CAP_CONF_SUCCESS)
2908 sk->sk_err = ECONNRESET;
2909 __set_chan_timer(chan, HZ * 5);
2910 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2917 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2919 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2920 set_default_fcs(chan);
2922 l2cap_state_change(chan, BT_CONNECTED);
2923 chan->next_tx_seq = 0;
2924 chan->expected_tx_seq = 0;
2925 skb_queue_head_init(&chan->tx_q);
2926 if (chan->mode == L2CAP_MODE_ERTM)
2927 l2cap_ertm_init(chan);
2929 l2cap_chan_ready(sk);
2937 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2939 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2940 struct l2cap_disconn_rsp rsp;
2942 struct l2cap_chan *chan;
2945 scid = __le16_to_cpu(req->scid);
2946 dcid = __le16_to_cpu(req->dcid);
2948 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2950 chan = l2cap_get_chan_by_scid(conn, dcid);
2956 rsp.dcid = cpu_to_le16(chan->scid);
2957 rsp.scid = cpu_to_le16(chan->dcid);
2958 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2960 sk->sk_shutdown = SHUTDOWN_MASK;
2962 /* don't delete l2cap channel if sk is owned by user */
2963 if (sock_owned_by_user(sk)) {
2964 l2cap_state_change(chan, BT_DISCONN);
2965 __clear_chan_timer(chan);
2966 __set_chan_timer(chan, HZ / 5);
2971 l2cap_chan_del(chan, ECONNRESET);
2974 chan->ops->close(chan->data);
2978 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2980 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2982 struct l2cap_chan *chan;
2985 scid = __le16_to_cpu(rsp->scid);
2986 dcid = __le16_to_cpu(rsp->dcid);
2988 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2990 chan = l2cap_get_chan_by_scid(conn, scid);
2996 /* don't delete l2cap channel if sk is owned by user */
2997 if (sock_owned_by_user(sk)) {
2998 l2cap_state_change(chan,BT_DISCONN);
2999 __clear_chan_timer(chan);
3000 __set_chan_timer(chan, HZ / 5);
3005 l2cap_chan_del(chan, 0);
3008 chan->ops->close(chan->data);
3012 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3014 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3017 type = __le16_to_cpu(req->type);
3019 BT_DBG("type 0x%4.4x", type);
3021 if (type == L2CAP_IT_FEAT_MASK) {
3023 u32 feat_mask = l2cap_feat_mask;
3024 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3025 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3026 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3028 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3031 feat_mask |= L2CAP_FEAT_EXT_FLOW
3032 | L2CAP_FEAT_EXT_WINDOW;
3034 put_unaligned_le32(feat_mask, rsp->data);
3035 l2cap_send_cmd(conn, cmd->ident,
3036 L2CAP_INFO_RSP, sizeof(buf), buf);
3037 } else if (type == L2CAP_IT_FIXED_CHAN) {
3039 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3042 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3044 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3046 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3047 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3048 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3049 l2cap_send_cmd(conn, cmd->ident,
3050 L2CAP_INFO_RSP, sizeof(buf), buf);
3052 struct l2cap_info_rsp rsp;
3053 rsp.type = cpu_to_le16(type);
3054 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3055 l2cap_send_cmd(conn, cmd->ident,
3056 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3062 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3064 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3067 type = __le16_to_cpu(rsp->type);
3068 result = __le16_to_cpu(rsp->result);
3070 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3072 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3073 if (cmd->ident != conn->info_ident ||
3074 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3077 del_timer(&conn->info_timer);
3079 if (result != L2CAP_IR_SUCCESS) {
3080 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3081 conn->info_ident = 0;
3083 l2cap_conn_start(conn);
3088 if (type == L2CAP_IT_FEAT_MASK) {
3089 conn->feat_mask = get_unaligned_le32(rsp->data);
3091 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3092 struct l2cap_info_req req;
3093 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3095 conn->info_ident = l2cap_get_ident(conn);
3097 l2cap_send_cmd(conn, conn->info_ident,
3098 L2CAP_INFO_REQ, sizeof(req), &req);
3100 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3101 conn->info_ident = 0;
3103 l2cap_conn_start(conn);
3105 } else if (type == L2CAP_IT_FIXED_CHAN) {
3106 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3107 conn->info_ident = 0;
3109 l2cap_conn_start(conn);
3115 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3116 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3119 struct l2cap_create_chan_req *req = data;
3120 struct l2cap_create_chan_rsp rsp;
3123 if (cmd_len != sizeof(*req))
3129 psm = le16_to_cpu(req->psm);
3130 scid = le16_to_cpu(req->scid);
3132 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3134 /* Placeholder: Always reject */
3136 rsp.scid = cpu_to_le16(scid);
3137 rsp.result = L2CAP_CR_NO_MEM;
3138 rsp.status = L2CAP_CS_NO_INFO;
3140 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3146 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3147 struct l2cap_cmd_hdr *cmd, void *data)
3149 BT_DBG("conn %p", conn);
3151 return l2cap_connect_rsp(conn, cmd, data);
3154 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3155 u16 icid, u16 result)
3157 struct l2cap_move_chan_rsp rsp;
3159 BT_DBG("icid %d, result %d", icid, result);
3161 rsp.icid = cpu_to_le16(icid);
3162 rsp.result = cpu_to_le16(result);
3164 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3167 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3168 struct l2cap_chan *chan, u16 icid, u16 result)
3170 struct l2cap_move_chan_cfm cfm;
3173 BT_DBG("icid %d, result %d", icid, result);
3175 ident = l2cap_get_ident(conn);
3177 chan->ident = ident;
3179 cfm.icid = cpu_to_le16(icid);
3180 cfm.result = cpu_to_le16(result);
3182 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3185 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3188 struct l2cap_move_chan_cfm_rsp rsp;
3190 BT_DBG("icid %d", icid);
3192 rsp.icid = cpu_to_le16(icid);
3193 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3196 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3197 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3199 struct l2cap_move_chan_req *req = data;
3201 u16 result = L2CAP_MR_NOT_ALLOWED;
3203 if (cmd_len != sizeof(*req))
3206 icid = le16_to_cpu(req->icid);
3208 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3213 /* Placeholder: Always refuse */
3214 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3219 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3220 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3222 struct l2cap_move_chan_rsp *rsp = data;
3225 if (cmd_len != sizeof(*rsp))
3228 icid = le16_to_cpu(rsp->icid);
3229 result = le16_to_cpu(rsp->result);
3231 BT_DBG("icid %d, result %d", icid, result);
3233 /* Placeholder: Always unconfirmed */
3234 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3239 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3240 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3242 struct l2cap_move_chan_cfm *cfm = data;
3245 if (cmd_len != sizeof(*cfm))
3248 icid = le16_to_cpu(cfm->icid);
3249 result = le16_to_cpu(cfm->result);
3251 BT_DBG("icid %d, result %d", icid, result);
3253 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3258 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3259 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3261 struct l2cap_move_chan_cfm_rsp *rsp = data;
3264 if (cmd_len != sizeof(*rsp))
3267 icid = le16_to_cpu(rsp->icid);
3269 BT_DBG("icid %d", icid);
3274 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3279 if (min > max || min < 6 || max > 3200)
3282 if (to_multiplier < 10 || to_multiplier > 3200)
3285 if (max >= to_multiplier * 8)
3288 max_latency = (to_multiplier * 8 / max) - 1;
3289 if (latency > 499 || latency > max_latency)
3295 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3296 struct l2cap_cmd_hdr *cmd, u8 *data)
3298 struct hci_conn *hcon = conn->hcon;
3299 struct l2cap_conn_param_update_req *req;
3300 struct l2cap_conn_param_update_rsp rsp;
3301 u16 min, max, latency, to_multiplier, cmd_len;
3304 if (!(hcon->link_mode & HCI_LM_MASTER))
3307 cmd_len = __le16_to_cpu(cmd->len);
3308 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3311 req = (struct l2cap_conn_param_update_req *) data;
3312 min = __le16_to_cpu(req->min);
3313 max = __le16_to_cpu(req->max);
3314 latency = __le16_to_cpu(req->latency);
3315 to_multiplier = __le16_to_cpu(req->to_multiplier);
3317 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3318 min, max, latency, to_multiplier);
3320 memset(&rsp, 0, sizeof(rsp));
3322 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3324 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3326 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3328 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3332 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3337 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3338 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3342 switch (cmd->code) {
3343 case L2CAP_COMMAND_REJ:
3344 l2cap_command_rej(conn, cmd, data);
3347 case L2CAP_CONN_REQ:
3348 err = l2cap_connect_req(conn, cmd, data);
3351 case L2CAP_CONN_RSP:
3352 err = l2cap_connect_rsp(conn, cmd, data);
3355 case L2CAP_CONF_REQ:
3356 err = l2cap_config_req(conn, cmd, cmd_len, data);
3359 case L2CAP_CONF_RSP:
3360 err = l2cap_config_rsp(conn, cmd, data);
3363 case L2CAP_DISCONN_REQ:
3364 err = l2cap_disconnect_req(conn, cmd, data);
3367 case L2CAP_DISCONN_RSP:
3368 err = l2cap_disconnect_rsp(conn, cmd, data);
3371 case L2CAP_ECHO_REQ:
3372 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3375 case L2CAP_ECHO_RSP:
3378 case L2CAP_INFO_REQ:
3379 err = l2cap_information_req(conn, cmd, data);
3382 case L2CAP_INFO_RSP:
3383 err = l2cap_information_rsp(conn, cmd, data);
3386 case L2CAP_CREATE_CHAN_REQ:
3387 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3390 case L2CAP_CREATE_CHAN_RSP:
3391 err = l2cap_create_channel_rsp(conn, cmd, data);
3394 case L2CAP_MOVE_CHAN_REQ:
3395 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3398 case L2CAP_MOVE_CHAN_RSP:
3399 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3402 case L2CAP_MOVE_CHAN_CFM:
3403 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3406 case L2CAP_MOVE_CHAN_CFM_RSP:
3407 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3411 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3419 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3420 struct l2cap_cmd_hdr *cmd, u8 *data)
3422 switch (cmd->code) {
3423 case L2CAP_COMMAND_REJ:
3426 case L2CAP_CONN_PARAM_UPDATE_REQ:
3427 return l2cap_conn_param_update_req(conn, cmd, data);
3429 case L2CAP_CONN_PARAM_UPDATE_RSP:
3433 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3438 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3439 struct sk_buff *skb)
3441 u8 *data = skb->data;
3443 struct l2cap_cmd_hdr cmd;
3446 l2cap_raw_recv(conn, skb);
3448 while (len >= L2CAP_CMD_HDR_SIZE) {
3450 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3451 data += L2CAP_CMD_HDR_SIZE;
3452 len -= L2CAP_CMD_HDR_SIZE;
3454 cmd_len = le16_to_cpu(cmd.len);
3456 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3458 if (cmd_len > len || !cmd.ident) {
3459 BT_DBG("corrupted command");
3463 if (conn->hcon->type == LE_LINK)
3464 err = l2cap_le_sig_cmd(conn, &cmd, data);
3466 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3469 struct l2cap_cmd_rej_unk rej;
3471 BT_ERR("Wrong link type (%d)", err);
3473 /* FIXME: Map err to a valid reason */
3474 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3475 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3485 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3487 u16 our_fcs, rcv_fcs;
3490 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3491 hdr_size = L2CAP_EXT_HDR_SIZE;
3493 hdr_size = L2CAP_ENH_HDR_SIZE;
3495 if (chan->fcs == L2CAP_FCS_CRC16) {
3496 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3497 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3498 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3500 if (our_fcs != rcv_fcs)
3506 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3510 chan->frames_sent = 0;
3512 control |= __set_reqseq(chan, chan->buffer_seq);
3514 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3515 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3516 l2cap_send_sframe(chan, control);
3517 set_bit(CONN_RNR_SENT, &chan->conn_state);
3520 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3521 l2cap_retransmit_frames(chan);
3523 l2cap_ertm_send(chan);
3525 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3526 chan->frames_sent == 0) {
3527 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3528 l2cap_send_sframe(chan, control);
3532 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3534 struct sk_buff *next_skb;
3535 int tx_seq_offset, next_tx_seq_offset;
3537 bt_cb(skb)->tx_seq = tx_seq;
3538 bt_cb(skb)->sar = sar;
3540 next_skb = skb_peek(&chan->srej_q);
3542 __skb_queue_tail(&chan->srej_q, skb);
3546 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3549 if (bt_cb(next_skb)->tx_seq == tx_seq)
3552 next_tx_seq_offset = __seq_offset(chan,
3553 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3555 if (next_tx_seq_offset > tx_seq_offset) {
3556 __skb_queue_before(&chan->srej_q, next_skb, skb);
3560 if (skb_queue_is_last(&chan->srej_q, next_skb))
3563 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3565 __skb_queue_tail(&chan->srej_q, skb);
3570 static void append_skb_frag(struct sk_buff *skb,
3571 struct sk_buff *new_frag, struct sk_buff **last_frag)
3573 /* skb->len reflects data in skb as well as all fragments
3574 * skb->data_len reflects only data in fragments
3576 if (!skb_has_frag_list(skb))
3577 skb_shinfo(skb)->frag_list = new_frag;
3579 new_frag->next = NULL;
3581 (*last_frag)->next = new_frag;
3582 *last_frag = new_frag;
3584 skb->len += new_frag->len;
3585 skb->data_len += new_frag->len;
3586 skb->truesize += new_frag->truesize;
3589 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3593 switch (__get_ctrl_sar(chan, control)) {
3594 case L2CAP_SAR_UNSEGMENTED:
3598 err = chan->ops->recv(chan->data, skb);
3601 case L2CAP_SAR_START:
3605 chan->sdu_len = get_unaligned_le16(skb->data);
3606 skb_pull(skb, L2CAP_SDULEN_SIZE);
3608 if (chan->sdu_len > chan->imtu) {
3613 if (skb->len >= chan->sdu_len)
3617 chan->sdu_last_frag = skb;
3623 case L2CAP_SAR_CONTINUE:
3627 append_skb_frag(chan->sdu, skb,
3628 &chan->sdu_last_frag);
3631 if (chan->sdu->len >= chan->sdu_len)
3641 append_skb_frag(chan->sdu, skb,
3642 &chan->sdu_last_frag);
3645 if (chan->sdu->len != chan->sdu_len)
3648 err = chan->ops->recv(chan->data, chan->sdu);
3651 /* Reassembly complete */
3653 chan->sdu_last_frag = NULL;
3661 kfree_skb(chan->sdu);
3663 chan->sdu_last_frag = NULL;
3670 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3674 BT_DBG("chan %p, Enter local busy", chan);
3676 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3678 control = __set_reqseq(chan, chan->buffer_seq);
3679 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3680 l2cap_send_sframe(chan, control);
3682 set_bit(CONN_RNR_SENT, &chan->conn_state);
3684 __clear_ack_timer(chan);
3687 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3691 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3694 control = __set_reqseq(chan, chan->buffer_seq);
3695 control |= __set_ctrl_poll(chan);
3696 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3697 l2cap_send_sframe(chan, control);
3698 chan->retry_count = 1;
3700 __clear_retrans_timer(chan);
3701 __set_monitor_timer(chan);
3703 set_bit(CONN_WAIT_F, &chan->conn_state);
3706 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3707 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3709 BT_DBG("chan %p, Exit local busy", chan);
3712 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3714 if (chan->mode == L2CAP_MODE_ERTM) {
3716 l2cap_ertm_enter_local_busy(chan);
3718 l2cap_ertm_exit_local_busy(chan);
3722 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3724 struct sk_buff *skb;
3727 while ((skb = skb_peek(&chan->srej_q)) &&
3728 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3731 if (bt_cb(skb)->tx_seq != tx_seq)
3734 skb = skb_dequeue(&chan->srej_q);
3735 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3736 err = l2cap_reassemble_sdu(chan, skb, control);
3739 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3743 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3744 tx_seq = __next_seq(chan, tx_seq);
3748 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3750 struct srej_list *l, *tmp;
3753 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3754 if (l->tx_seq == tx_seq) {
3759 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3760 control |= __set_reqseq(chan, l->tx_seq);
3761 l2cap_send_sframe(chan, control);
3763 list_add_tail(&l->list, &chan->srej_l);
3767 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3769 struct srej_list *new;
3772 while (tx_seq != chan->expected_tx_seq) {
3773 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3774 control |= __set_reqseq(chan, chan->expected_tx_seq);
3775 l2cap_send_sframe(chan, control);
3777 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3778 new->tx_seq = chan->expected_tx_seq;
3780 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3782 list_add_tail(&new->list, &chan->srej_l);
3785 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3788 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3790 u16 tx_seq = __get_txseq(chan, rx_control);
3791 u16 req_seq = __get_reqseq(chan, rx_control);
3792 u8 sar = __get_ctrl_sar(chan, rx_control);
3793 int tx_seq_offset, expected_tx_seq_offset;
3794 int num_to_ack = (chan->tx_win/6) + 1;
3797 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3798 tx_seq, rx_control);
3800 if (__is_ctrl_final(chan, rx_control) &&
3801 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3802 __clear_monitor_timer(chan);
3803 if (chan->unacked_frames > 0)
3804 __set_retrans_timer(chan);
3805 clear_bit(CONN_WAIT_F, &chan->conn_state);
3808 chan->expected_ack_seq = req_seq;
3809 l2cap_drop_acked_frames(chan);
3811 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3813 /* invalid tx_seq */
3814 if (tx_seq_offset >= chan->tx_win) {
3815 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3819 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3822 if (tx_seq == chan->expected_tx_seq)
3825 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3826 struct srej_list *first;
3828 first = list_first_entry(&chan->srej_l,
3829 struct srej_list, list);
3830 if (tx_seq == first->tx_seq) {
3831 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3832 l2cap_check_srej_gap(chan, tx_seq);
3834 list_del(&first->list);
3837 if (list_empty(&chan->srej_l)) {
3838 chan->buffer_seq = chan->buffer_seq_srej;
3839 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3840 l2cap_send_ack(chan);
3841 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3844 struct srej_list *l;
3846 /* duplicated tx_seq */
3847 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3850 list_for_each_entry(l, &chan->srej_l, list) {
3851 if (l->tx_seq == tx_seq) {
3852 l2cap_resend_srejframe(chan, tx_seq);
3856 l2cap_send_srejframe(chan, tx_seq);
3859 expected_tx_seq_offset = __seq_offset(chan,
3860 chan->expected_tx_seq, chan->buffer_seq);
3862 /* duplicated tx_seq */
3863 if (tx_seq_offset < expected_tx_seq_offset)
3866 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3868 BT_DBG("chan %p, Enter SREJ", chan);
3870 INIT_LIST_HEAD(&chan->srej_l);
3871 chan->buffer_seq_srej = chan->buffer_seq;
3873 __skb_queue_head_init(&chan->srej_q);
3874 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3876 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3878 l2cap_send_srejframe(chan, tx_seq);
3880 __clear_ack_timer(chan);
3885 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3887 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3888 bt_cb(skb)->tx_seq = tx_seq;
3889 bt_cb(skb)->sar = sar;
3890 __skb_queue_tail(&chan->srej_q, skb);
3894 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3895 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3898 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3902 if (__is_ctrl_final(chan, rx_control)) {
3903 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3904 l2cap_retransmit_frames(chan);
3907 __set_ack_timer(chan);
3909 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3910 if (chan->num_acked == num_to_ack - 1)
3911 l2cap_send_ack(chan);
3920 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3922 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3923 __get_reqseq(chan, rx_control), rx_control);
3925 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3926 l2cap_drop_acked_frames(chan);
3928 if (__is_ctrl_poll(chan, rx_control)) {
3929 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3930 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3931 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3932 (chan->unacked_frames > 0))
3933 __set_retrans_timer(chan);
3935 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3936 l2cap_send_srejtail(chan);
3938 l2cap_send_i_or_rr_or_rnr(chan);
3941 } else if (__is_ctrl_final(chan, rx_control)) {
3942 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3944 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3945 l2cap_retransmit_frames(chan);
3948 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3949 (chan->unacked_frames > 0))
3950 __set_retrans_timer(chan);
3952 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3953 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3954 l2cap_send_ack(chan);
3956 l2cap_ertm_send(chan);
3960 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3962 u16 tx_seq = __get_reqseq(chan, rx_control);
3964 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3966 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3968 chan->expected_ack_seq = tx_seq;
3969 l2cap_drop_acked_frames(chan);
3971 if (__is_ctrl_final(chan, rx_control)) {
3972 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3973 l2cap_retransmit_frames(chan);
3975 l2cap_retransmit_frames(chan);
3977 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3978 set_bit(CONN_REJ_ACT, &chan->conn_state);
3981 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3983 u16 tx_seq = __get_reqseq(chan, rx_control);
3985 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3987 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3989 if (__is_ctrl_poll(chan, rx_control)) {
3990 chan->expected_ack_seq = tx_seq;
3991 l2cap_drop_acked_frames(chan);
3993 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3994 l2cap_retransmit_one_frame(chan, tx_seq);
3996 l2cap_ertm_send(chan);
3998 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3999 chan->srej_save_reqseq = tx_seq;
4000 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4002 } else if (__is_ctrl_final(chan, rx_control)) {
4003 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4004 chan->srej_save_reqseq == tx_seq)
4005 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4007 l2cap_retransmit_one_frame(chan, tx_seq);
4009 l2cap_retransmit_one_frame(chan, tx_seq);
4010 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4011 chan->srej_save_reqseq = tx_seq;
4012 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4017 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4019 u16 tx_seq = __get_reqseq(chan, rx_control);
4021 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4023 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4024 chan->expected_ack_seq = tx_seq;
4025 l2cap_drop_acked_frames(chan);
4027 if (__is_ctrl_poll(chan, rx_control))
4028 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4030 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4031 __clear_retrans_timer(chan);
4032 if (__is_ctrl_poll(chan, rx_control))
4033 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4037 if (__is_ctrl_poll(chan, rx_control)) {
4038 l2cap_send_srejtail(chan);
4040 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4041 l2cap_send_sframe(chan, rx_control);
4045 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4047 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4049 if (__is_ctrl_final(chan, rx_control) &&
4050 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4051 __clear_monitor_timer(chan);
4052 if (chan->unacked_frames > 0)
4053 __set_retrans_timer(chan);
4054 clear_bit(CONN_WAIT_F, &chan->conn_state);
4057 switch (__get_ctrl_super(chan, rx_control)) {
4058 case L2CAP_SUPER_RR:
4059 l2cap_data_channel_rrframe(chan, rx_control);
4062 case L2CAP_SUPER_REJ:
4063 l2cap_data_channel_rejframe(chan, rx_control);
4066 case L2CAP_SUPER_SREJ:
4067 l2cap_data_channel_srejframe(chan, rx_control);
4070 case L2CAP_SUPER_RNR:
4071 l2cap_data_channel_rnrframe(chan, rx_control);
4079 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4081 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4084 int len, next_tx_seq_offset, req_seq_offset;
4086 control = __get_control(chan, skb->data);
4087 skb_pull(skb, __ctrl_size(chan));
4091 * We can just drop the corrupted I-frame here.
4092 * Receiver will miss it and start proper recovery
4093 * procedures and ask retransmission.
4095 if (l2cap_check_fcs(chan, skb))
4098 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4099 len -= L2CAP_SDULEN_SIZE;
4101 if (chan->fcs == L2CAP_FCS_CRC16)
4102 len -= L2CAP_FCS_SIZE;
4104 if (len > chan->mps) {
4105 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4109 req_seq = __get_reqseq(chan, control);
4111 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4113 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4114 chan->expected_ack_seq);
4116 /* check for invalid req-seq */
4117 if (req_seq_offset > next_tx_seq_offset) {
4118 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4122 if (!__is_sframe(chan, control)) {
4124 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4128 l2cap_data_channel_iframe(chan, control, skb);
4132 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4136 l2cap_data_channel_sframe(chan, control, skb);
4146 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4148 struct l2cap_chan *chan;
4149 struct sock *sk = NULL;
4154 chan = l2cap_get_chan_by_scid(conn, cid);
4156 BT_DBG("unknown cid 0x%4.4x", cid);
4162 BT_DBG("chan %p, len %d", chan, skb->len);
4164 if (chan->state != BT_CONNECTED)
4167 switch (chan->mode) {
4168 case L2CAP_MODE_BASIC:
4169 /* If socket recv buffers overflows we drop data here
4170 * which is *bad* because L2CAP has to be reliable.
4171 * But we don't have any other choice. L2CAP doesn't
4172 * provide flow control mechanism. */
4174 if (chan->imtu < skb->len)
4177 if (!chan->ops->recv(chan->data, skb))
4181 case L2CAP_MODE_ERTM:
4182 if (!sock_owned_by_user(sk)) {
4183 l2cap_ertm_data_rcv(sk, skb);
4185 if (sk_add_backlog(sk, skb))
4191 case L2CAP_MODE_STREAMING:
4192 control = __get_control(chan, skb->data);
4193 skb_pull(skb, __ctrl_size(chan));
4196 if (l2cap_check_fcs(chan, skb))
4199 if (__is_sar_start(chan, control))
4200 len -= L2CAP_SDULEN_SIZE;
4202 if (chan->fcs == L2CAP_FCS_CRC16)
4203 len -= L2CAP_FCS_SIZE;
4205 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4208 tx_seq = __get_txseq(chan, control);
4210 if (chan->expected_tx_seq != tx_seq) {
4211 /* Frame(s) missing - must discard partial SDU */
4212 kfree_skb(chan->sdu);
4214 chan->sdu_last_frag = NULL;
4217 /* TODO: Notify userland of missing data */
4220 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4222 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4223 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4228 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4242 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4244 struct sock *sk = NULL;
4245 struct l2cap_chan *chan;
4247 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4255 BT_DBG("sk %p, len %d", sk, skb->len);
4257 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4260 if (chan->imtu < skb->len)
4263 if (!chan->ops->recv(chan->data, skb))
4275 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4277 struct sock *sk = NULL;
4278 struct l2cap_chan *chan;
4280 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4288 BT_DBG("sk %p, len %d", sk, skb->len);
4290 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4293 if (chan->imtu < skb->len)
4296 if (!chan->ops->recv(chan->data, skb))
4308 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4310 struct l2cap_hdr *lh = (void *) skb->data;
4314 skb_pull(skb, L2CAP_HDR_SIZE);
4315 cid = __le16_to_cpu(lh->cid);
4316 len = __le16_to_cpu(lh->len);
4318 if (len != skb->len) {
4323 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4326 case L2CAP_CID_LE_SIGNALING:
4327 case L2CAP_CID_SIGNALING:
4328 l2cap_sig_channel(conn, skb);
4331 case L2CAP_CID_CONN_LESS:
4332 psm = get_unaligned_le16(skb->data);
4334 l2cap_conless_channel(conn, psm, skb);
4337 case L2CAP_CID_LE_DATA:
4338 l2cap_att_channel(conn, cid, skb);
4342 if (smp_sig_channel(conn, skb))
4343 l2cap_conn_del(conn->hcon, EACCES);
4347 l2cap_data_channel(conn, cid, skb);
4352 /* ---- L2CAP interface with lower layer (HCI) ---- */
4354 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4356 int exact = 0, lm1 = 0, lm2 = 0;
4357 struct l2cap_chan *c;
4359 if (type != ACL_LINK)
4362 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4364 /* Find listening sockets and check their link_mode */
4365 read_lock(&chan_list_lock);
4366 list_for_each_entry(c, &chan_list, global_l) {
4367 struct sock *sk = c->sk;
4369 if (c->state != BT_LISTEN)
4372 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4373 lm1 |= HCI_LM_ACCEPT;
4374 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4375 lm1 |= HCI_LM_MASTER;
4377 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4378 lm2 |= HCI_LM_ACCEPT;
4379 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4380 lm2 |= HCI_LM_MASTER;
4383 read_unlock(&chan_list_lock);
4385 return exact ? lm1 : lm2;
4388 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4390 struct l2cap_conn *conn;
4392 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4394 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4398 conn = l2cap_conn_add(hcon, status);
4400 l2cap_conn_ready(conn);
4402 l2cap_conn_del(hcon, bt_to_errno(status));
4407 static int l2cap_disconn_ind(struct hci_conn *hcon)
4409 struct l2cap_conn *conn = hcon->l2cap_data;
4411 BT_DBG("hcon %p", hcon);
4413 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4414 return HCI_ERROR_REMOTE_USER_TERM;
4416 return conn->disc_reason;
4419 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4421 BT_DBG("hcon %p reason %d", hcon, reason);
4423 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4426 l2cap_conn_del(hcon, bt_to_errno(reason));
4431 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4433 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4436 if (encrypt == 0x00) {
4437 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4438 __clear_chan_timer(chan);
4439 __set_chan_timer(chan, HZ * 5);
4440 } else if (chan->sec_level == BT_SECURITY_HIGH)
4441 l2cap_chan_close(chan, ECONNREFUSED);
4443 if (chan->sec_level == BT_SECURITY_MEDIUM)
4444 __clear_chan_timer(chan);
4448 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4450 struct l2cap_conn *conn = hcon->l2cap_data;
4451 struct l2cap_chan *chan;
4456 BT_DBG("conn %p", conn);
4458 if (hcon->type == LE_LINK) {
4459 smp_distribute_keys(conn, 0);
4460 del_timer(&conn->security_timer);
4463 read_lock(&conn->chan_lock);
4465 list_for_each_entry(chan, &conn->chan_l, list) {
4466 struct sock *sk = chan->sk;
4470 BT_DBG("chan->scid %d", chan->scid);
4472 if (chan->scid == L2CAP_CID_LE_DATA) {
4473 if (!status && encrypt) {
4474 chan->sec_level = hcon->sec_level;
4475 l2cap_chan_ready(sk);
4482 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4487 if (!status && (chan->state == BT_CONNECTED ||
4488 chan->state == BT_CONFIG)) {
4489 l2cap_check_encryption(chan, encrypt);
4494 if (chan->state == BT_CONNECT) {
4496 struct l2cap_conn_req req;
4497 req.scid = cpu_to_le16(chan->scid);
4498 req.psm = chan->psm;
4500 chan->ident = l2cap_get_ident(conn);
4501 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4503 l2cap_send_cmd(conn, chan->ident,
4504 L2CAP_CONN_REQ, sizeof(req), &req);
4506 __clear_chan_timer(chan);
4507 __set_chan_timer(chan, HZ / 10);
4509 } else if (chan->state == BT_CONNECT2) {
4510 struct l2cap_conn_rsp rsp;
4514 if (bt_sk(sk)->defer_setup) {
4515 struct sock *parent = bt_sk(sk)->parent;
4516 res = L2CAP_CR_PEND;
4517 stat = L2CAP_CS_AUTHOR_PEND;
4519 parent->sk_data_ready(parent, 0);
4521 l2cap_state_change(chan, BT_CONFIG);
4522 res = L2CAP_CR_SUCCESS;
4523 stat = L2CAP_CS_NO_INFO;
4526 l2cap_state_change(chan, BT_DISCONN);
4527 __set_chan_timer(chan, HZ / 10);
4528 res = L2CAP_CR_SEC_BLOCK;
4529 stat = L2CAP_CS_NO_INFO;
4532 rsp.scid = cpu_to_le16(chan->dcid);
4533 rsp.dcid = cpu_to_le16(chan->scid);
4534 rsp.result = cpu_to_le16(res);
4535 rsp.status = cpu_to_le16(stat);
4536 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4543 read_unlock(&conn->chan_lock);
4548 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4550 struct l2cap_conn *conn = hcon->l2cap_data;
4553 conn = l2cap_conn_add(hcon, 0);
4558 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4560 if (!(flags & ACL_CONT)) {
4561 struct l2cap_hdr *hdr;
4562 struct l2cap_chan *chan;
4567 BT_ERR("Unexpected start frame (len %d)", skb->len);
4568 kfree_skb(conn->rx_skb);
4569 conn->rx_skb = NULL;
4571 l2cap_conn_unreliable(conn, ECOMM);
4574 /* Start fragment always begin with Basic L2CAP header */
4575 if (skb->len < L2CAP_HDR_SIZE) {
4576 BT_ERR("Frame is too short (len %d)", skb->len);
4577 l2cap_conn_unreliable(conn, ECOMM);
4581 hdr = (struct l2cap_hdr *) skb->data;
4582 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4583 cid = __le16_to_cpu(hdr->cid);
4585 if (len == skb->len) {
4586 /* Complete frame received */
4587 l2cap_recv_frame(conn, skb);
4591 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4593 if (skb->len > len) {
4594 BT_ERR("Frame is too long (len %d, expected len %d)",
4596 l2cap_conn_unreliable(conn, ECOMM);
4600 chan = l2cap_get_chan_by_scid(conn, cid);
4602 if (chan && chan->sk) {
4603 struct sock *sk = chan->sk;
4605 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4606 BT_ERR("Frame exceeding recv MTU (len %d, "
4610 l2cap_conn_unreliable(conn, ECOMM);
4616 /* Allocate skb for the complete frame (with header) */
4617 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4621 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4623 conn->rx_len = len - skb->len;
4625 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4627 if (!conn->rx_len) {
4628 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4629 l2cap_conn_unreliable(conn, ECOMM);
4633 if (skb->len > conn->rx_len) {
4634 BT_ERR("Fragment is too long (len %d, expected %d)",
4635 skb->len, conn->rx_len);
4636 kfree_skb(conn->rx_skb);
4637 conn->rx_skb = NULL;
4639 l2cap_conn_unreliable(conn, ECOMM);
4643 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4645 conn->rx_len -= skb->len;
4647 if (!conn->rx_len) {
4648 /* Complete frame received */
4649 l2cap_recv_frame(conn, conn->rx_skb);
4650 conn->rx_skb = NULL;
4659 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4661 struct l2cap_chan *c;
4663 read_lock_bh(&chan_list_lock);
4665 list_for_each_entry(c, &chan_list, global_l) {
4666 struct sock *sk = c->sk;
4668 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4669 batostr(&bt_sk(sk)->src),
4670 batostr(&bt_sk(sk)->dst),
4671 c->state, __le16_to_cpu(c->psm),
4672 c->scid, c->dcid, c->imtu, c->omtu,
4673 c->sec_level, c->mode);
4676 read_unlock_bh(&chan_list_lock);
4681 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4683 return single_open(file, l2cap_debugfs_show, inode->i_private);
4686 static const struct file_operations l2cap_debugfs_fops = {
4687 .open = l2cap_debugfs_open,
4689 .llseek = seq_lseek,
4690 .release = single_release,
4693 static struct dentry *l2cap_debugfs;
4695 static struct hci_proto l2cap_hci_proto = {
4697 .id = HCI_PROTO_L2CAP,
4698 .connect_ind = l2cap_connect_ind,
4699 .connect_cfm = l2cap_connect_cfm,
4700 .disconn_ind = l2cap_disconn_ind,
4701 .disconn_cfm = l2cap_disconn_cfm,
4702 .security_cfm = l2cap_security_cfm,
4703 .recv_acldata = l2cap_recv_acldata
4706 int __init l2cap_init(void)
4710 err = l2cap_init_sockets();
4714 err = hci_register_proto(&l2cap_hci_proto);
4716 BT_ERR("L2CAP protocol registration failed");
4717 bt_sock_unregister(BTPROTO_L2CAP);
4722 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4723 bt_debugfs, NULL, &l2cap_debugfs_fops);
4725 BT_ERR("Failed to create L2CAP debug file");
4731 l2cap_cleanup_sockets();
4735 void l2cap_exit(void)
4737 debugfs_remove(l2cap_debugfs);
4739 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4740 BT_ERR("L2CAP protocol unregistration failed");
4742 l2cap_cleanup_sockets();
4745 module_param(disable_ertm, bool, 0644);
4746 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4748 module_param(enable_hs, bool, 0644);
4749 MODULE_PARM_DESC(enable_hs, "Enable High Speed");