2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 list_for_each_entry(c, &conn->chan_l, list) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 struct l2cap_chan *c;
118 mutex_lock(&conn->chan_lock);
119 c = __l2cap_get_chan_by_scid(conn, cid);
122 mutex_unlock(&conn->chan_lock);
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 struct l2cap_chan *c;
130 list_for_each_entry(c, &conn->chan_l, list) {
131 if (c->ident == ident)
137 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139 struct l2cap_chan *c;
141 mutex_lock(&conn->chan_lock);
142 c = __l2cap_get_chan_by_ident(conn, ident);
145 mutex_unlock(&conn->chan_lock);
149 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151 struct l2cap_chan *c;
153 list_for_each_entry(c, &chan_list, global_l) {
154 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
160 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
164 write_lock_bh(&chan_list_lock);
166 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
179 for (p = 0x1001; p < 0x1100; p += 2)
180 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
181 chan->psm = cpu_to_le16(p);
182 chan->sport = cpu_to_le16(p);
189 write_unlock_bh(&chan_list_lock);
193 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
195 write_lock_bh(&chan_list_lock);
199 write_unlock_bh(&chan_list_lock);
204 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
206 u16 cid = L2CAP_CID_DYN_START;
208 for (; cid < L2CAP_CID_DYN_END; cid++) {
209 if (!__l2cap_get_chan_by_scid(conn, cid))
216 static void l2cap_set_timer(struct l2cap_chan *chan, struct delayed_work *work, long timeout)
218 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
220 cancel_delayed_work_sync(work);
222 schedule_delayed_work(work, timeout);
225 static void l2cap_clear_timer(struct delayed_work *work)
227 cancel_delayed_work_sync(work);
230 static char *state_to_string(int state)
234 return "BT_CONNECTED";
244 return "BT_CONNECT2";
253 return "invalid state";
256 static void l2cap_state_change(struct l2cap_chan *chan, int state)
258 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
259 state_to_string(state));
262 chan->ops->state_change(chan->data, state);
265 static void l2cap_chan_timeout(struct work_struct *work)
267 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
269 struct sock *sk = chan->sk;
272 BT_DBG("chan %p state %d", chan, chan->state);
276 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
277 reason = ECONNREFUSED;
278 else if (chan->state == BT_CONNECT &&
279 chan->sec_level != BT_SECURITY_SDP)
280 reason = ECONNREFUSED;
284 l2cap_chan_close(chan, reason);
288 chan->ops->close(chan->data);
292 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
294 struct l2cap_chan *chan;
296 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
302 write_lock_bh(&chan_list_lock);
303 list_add(&chan->global_l, &chan_list);
304 write_unlock_bh(&chan_list_lock);
306 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
308 chan->state = BT_OPEN;
310 atomic_set(&chan->refcnt, 1);
312 BT_DBG("sk %p chan %p", sk, chan);
317 void l2cap_chan_destroy(struct l2cap_chan *chan)
319 write_lock_bh(&chan_list_lock);
320 list_del(&chan->global_l);
321 write_unlock_bh(&chan_list_lock);
326 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
328 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
329 chan->psm, chan->dcid);
331 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
335 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
336 if (conn->hcon->type == LE_LINK) {
338 chan->omtu = L2CAP_LE_DEFAULT_MTU;
339 chan->scid = L2CAP_CID_LE_DATA;
340 chan->dcid = L2CAP_CID_LE_DATA;
342 /* Alloc CID for connection-oriented socket */
343 chan->scid = l2cap_alloc_cid(conn);
344 chan->omtu = L2CAP_DEFAULT_MTU;
346 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
347 /* Connectionless socket */
348 chan->scid = L2CAP_CID_CONN_LESS;
349 chan->dcid = L2CAP_CID_CONN_LESS;
350 chan->omtu = L2CAP_DEFAULT_MTU;
352 /* Raw socket can send/recv signalling messages only */
353 chan->scid = L2CAP_CID_SIGNALING;
354 chan->dcid = L2CAP_CID_SIGNALING;
355 chan->omtu = L2CAP_DEFAULT_MTU;
358 chan->local_id = L2CAP_BESTEFFORT_ID;
359 chan->local_stype = L2CAP_SERV_BESTEFFORT;
360 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
361 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
362 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
363 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
367 list_add(&chan->list, &conn->chan_l);
371 * Must be called on the locked socket. */
372 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
374 struct sock *sk = chan->sk;
375 struct l2cap_conn *conn = chan->conn;
376 struct sock *parent = bt_sk(sk)->parent;
378 __clear_chan_timer(chan);
380 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
383 /* Delete from channel list */
384 mutex_lock(&conn->chan_lock);
385 list_del(&chan->list);
386 mutex_unlock(&conn->chan_lock);
390 hci_conn_put(conn->hcon);
393 l2cap_state_change(chan, BT_CLOSED);
394 sock_set_flag(sk, SOCK_ZAPPED);
400 bt_accept_unlink(sk);
401 parent->sk_data_ready(parent, 0);
403 sk->sk_state_change(sk);
405 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
406 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
409 skb_queue_purge(&chan->tx_q);
411 if (chan->mode == L2CAP_MODE_ERTM) {
412 struct srej_list *l, *tmp;
414 __clear_retrans_timer(chan);
415 __clear_monitor_timer(chan);
416 __clear_ack_timer(chan);
418 skb_queue_purge(&chan->srej_q);
420 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
427 static void l2cap_chan_cleanup_listen(struct sock *parent)
431 BT_DBG("parent %p", parent);
433 /* Close not yet accepted channels */
434 while ((sk = bt_accept_dequeue(parent, NULL))) {
435 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
436 __clear_chan_timer(chan);
438 l2cap_chan_close(chan, ECONNRESET);
440 chan->ops->close(chan->data);
444 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
446 struct l2cap_conn *conn = chan->conn;
447 struct sock *sk = chan->sk;
449 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
451 switch (chan->state) {
453 l2cap_chan_cleanup_listen(sk);
455 l2cap_state_change(chan, BT_CLOSED);
456 sock_set_flag(sk, SOCK_ZAPPED);
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 __clear_chan_timer(chan);
464 __set_chan_timer(chan, sk->sk_sndtimeo);
465 l2cap_send_disconn_req(conn, chan, reason);
467 l2cap_chan_del(chan, reason);
471 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
472 conn->hcon->type == ACL_LINK) {
473 struct l2cap_conn_rsp rsp;
476 if (bt_sk(sk)->defer_setup)
477 result = L2CAP_CR_SEC_BLOCK;
479 result = L2CAP_CR_BAD_PSM;
480 l2cap_state_change(chan, BT_DISCONN);
482 rsp.scid = cpu_to_le16(chan->dcid);
483 rsp.dcid = cpu_to_le16(chan->scid);
484 rsp.result = cpu_to_le16(result);
485 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
486 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
490 l2cap_chan_del(chan, reason);
495 l2cap_chan_del(chan, reason);
499 sock_set_flag(sk, SOCK_ZAPPED);
504 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
506 if (chan->chan_type == L2CAP_CHAN_RAW) {
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_DEDICATED_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_DEDICATED_BONDING;
513 return HCI_AT_NO_BONDING;
515 } else if (chan->psm == cpu_to_le16(0x0001)) {
516 if (chan->sec_level == BT_SECURITY_LOW)
517 chan->sec_level = BT_SECURITY_SDP;
519 if (chan->sec_level == BT_SECURITY_HIGH)
520 return HCI_AT_NO_BONDING_MITM;
522 return HCI_AT_NO_BONDING;
524 switch (chan->sec_level) {
525 case BT_SECURITY_HIGH:
526 return HCI_AT_GENERAL_BONDING_MITM;
527 case BT_SECURITY_MEDIUM:
528 return HCI_AT_GENERAL_BONDING;
530 return HCI_AT_NO_BONDING;
535 /* Service level security */
536 int l2cap_chan_check_security(struct l2cap_chan *chan)
538 struct l2cap_conn *conn = chan->conn;
541 auth_type = l2cap_get_auth_type(chan);
543 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
546 static u8 l2cap_get_ident(struct l2cap_conn *conn)
550 /* Get next available identificator.
551 * 1 - 128 are used by kernel.
552 * 129 - 199 are reserved.
553 * 200 - 254 are used by utilities like l2ping, etc.
556 spin_lock_bh(&conn->lock);
558 if (++conn->tx_ident > 128)
563 spin_unlock_bh(&conn->lock);
568 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
570 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
573 BT_DBG("code 0x%2.2x", code);
578 if (lmp_no_flush_capable(conn->hcon->hdev))
579 flags = ACL_START_NO_FLUSH;
583 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
584 skb->priority = HCI_PRIO_MAX;
586 hci_send_acl(conn->hchan, skb, flags);
589 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
591 struct hci_conn *hcon = chan->conn->hcon;
594 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
597 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
598 lmp_no_flush_capable(hcon->hdev))
599 flags = ACL_START_NO_FLUSH;
603 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
604 hci_send_acl(chan->conn->hchan, skb, flags);
607 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
610 struct l2cap_hdr *lh;
611 struct l2cap_conn *conn = chan->conn;
614 if (chan->state != BT_CONNECTED)
617 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
618 hlen = L2CAP_EXT_HDR_SIZE;
620 hlen = L2CAP_ENH_HDR_SIZE;
622 if (chan->fcs == L2CAP_FCS_CRC16)
623 hlen += L2CAP_FCS_SIZE;
625 BT_DBG("chan %p, control 0x%8.8x", chan, control);
627 count = min_t(unsigned int, conn->mtu, hlen);
629 control |= __set_sframe(chan);
631 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
632 control |= __set_ctrl_final(chan);
634 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
635 control |= __set_ctrl_poll(chan);
637 skb = bt_skb_alloc(count, GFP_ATOMIC);
641 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
642 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
643 lh->cid = cpu_to_le16(chan->dcid);
645 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
647 if (chan->fcs == L2CAP_FCS_CRC16) {
648 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
649 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
652 skb->priority = HCI_PRIO_MAX;
653 l2cap_do_send(chan, skb);
656 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
658 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
659 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
660 set_bit(CONN_RNR_SENT, &chan->conn_state);
662 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
664 control |= __set_reqseq(chan, chan->buffer_seq);
666 l2cap_send_sframe(chan, control);
669 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
671 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
674 static void l2cap_do_start(struct l2cap_chan *chan)
676 struct l2cap_conn *conn = chan->conn;
678 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
679 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
682 if (l2cap_chan_check_security(chan) &&
683 __l2cap_no_conn_pending(chan)) {
684 struct l2cap_conn_req req;
685 req.scid = cpu_to_le16(chan->scid);
688 chan->ident = l2cap_get_ident(conn);
689 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
691 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
695 struct l2cap_info_req req;
696 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
698 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
699 conn->info_ident = l2cap_get_ident(conn);
701 mod_timer(&conn->info_timer, jiffies +
702 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
704 l2cap_send_cmd(conn, conn->info_ident,
705 L2CAP_INFO_REQ, sizeof(req), &req);
709 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
711 u32 local_feat_mask = l2cap_feat_mask;
713 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
716 case L2CAP_MODE_ERTM:
717 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
718 case L2CAP_MODE_STREAMING:
719 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
725 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
728 struct l2cap_disconn_req req;
735 if (chan->mode == L2CAP_MODE_ERTM) {
736 __clear_retrans_timer(chan);
737 __clear_monitor_timer(chan);
738 __clear_ack_timer(chan);
741 req.dcid = cpu_to_le16(chan->dcid);
742 req.scid = cpu_to_le16(chan->scid);
743 l2cap_send_cmd(conn, l2cap_get_ident(conn),
744 L2CAP_DISCONN_REQ, sizeof(req), &req);
746 l2cap_state_change(chan, BT_DISCONN);
750 /* ---- L2CAP connections ---- */
751 static void l2cap_conn_start(struct l2cap_conn *conn)
753 struct l2cap_chan *chan, *tmp;
755 BT_DBG("conn %p", conn);
757 mutex_lock(&conn->chan_lock);
759 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
764 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
769 if (chan->state == BT_CONNECT) {
770 struct l2cap_conn_req req;
772 if (!l2cap_chan_check_security(chan) ||
773 !__l2cap_no_conn_pending(chan)) {
778 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
779 && test_bit(CONF_STATE2_DEVICE,
780 &chan->conf_state)) {
781 /* l2cap_chan_close() calls list_del(chan)
782 * so release the lock */
783 mutex_unlock(&conn->chan_lock);
784 l2cap_chan_close(chan, ECONNRESET);
785 utex_lock(&conn->chan_lock);
790 req.scid = cpu_to_le16(chan->scid);
793 chan->ident = l2cap_get_ident(conn);
794 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
796 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
799 } else if (chan->state == BT_CONNECT2) {
800 struct l2cap_conn_rsp rsp;
802 rsp.scid = cpu_to_le16(chan->dcid);
803 rsp.dcid = cpu_to_le16(chan->scid);
805 if (l2cap_chan_check_security(chan)) {
806 if (bt_sk(sk)->defer_setup) {
807 struct sock *parent = bt_sk(sk)->parent;
808 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
809 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
811 parent->sk_data_ready(parent, 0);
814 l2cap_state_change(chan, BT_CONFIG);
815 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
816 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
819 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
820 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
823 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
826 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
827 rsp.result != L2CAP_CR_SUCCESS) {
832 set_bit(CONF_REQ_SENT, &chan->conf_state);
833 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
834 l2cap_build_conf_req(chan, buf), buf);
835 chan->num_conf_req++;
841 mutex_unlock(&conn->chan_lock);
844 /* Find socket with cid and source bdaddr.
845 * Returns closest match, locked.
847 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
849 struct l2cap_chan *c, *c1 = NULL;
851 read_lock(&chan_list_lock);
853 list_for_each_entry(c, &chan_list, global_l) {
854 struct sock *sk = c->sk;
856 if (state && c->state != state)
859 if (c->scid == cid) {
861 if (!bacmp(&bt_sk(sk)->src, src)) {
862 read_unlock(&chan_list_lock);
867 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
872 read_unlock(&chan_list_lock);
877 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
879 struct sock *parent, *sk;
880 struct l2cap_chan *chan, *pchan;
884 /* Check if we have socket listening on cid */
885 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
894 /* Check for backlog size */
895 if (sk_acceptq_is_full(parent)) {
896 BT_DBG("backlog full %d", parent->sk_ack_backlog);
900 chan = pchan->ops->new_connection(pchan->data);
906 mutex_lock(&conn->chan_lock);
908 hci_conn_hold(conn->hcon);
910 bacpy(&bt_sk(sk)->src, conn->src);
911 bacpy(&bt_sk(sk)->dst, conn->dst);
913 bt_accept_enqueue(parent, sk);
915 __l2cap_chan_add(conn, chan);
917 __set_chan_timer(chan, sk->sk_sndtimeo);
919 l2cap_state_change(chan, BT_CONNECTED);
920 parent->sk_data_ready(parent, 0);
922 mutex_unlock(&conn->chan_lock);
925 release_sock(parent);
928 static void l2cap_chan_ready(struct sock *sk)
930 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
931 struct sock *parent = bt_sk(sk)->parent;
933 BT_DBG("sk %p, parent %p", sk, parent);
935 chan->conf_state = 0;
936 __clear_chan_timer(chan);
938 l2cap_state_change(chan, BT_CONNECTED);
939 sk->sk_state_change(sk);
942 parent->sk_data_ready(parent, 0);
945 static void l2cap_conn_ready(struct l2cap_conn *conn)
947 struct l2cap_chan *chan;
949 BT_DBG("conn %p", conn);
951 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
952 l2cap_le_conn_ready(conn);
954 if (conn->hcon->out && conn->hcon->type == LE_LINK)
955 smp_conn_security(conn, conn->hcon->pending_sec_level);
957 mutex_lock(&conn->chan_lock);
959 list_for_each_entry(chan, &conn->chan_l, list) {
960 struct sock *sk = chan->sk;
964 if (conn->hcon->type == LE_LINK) {
965 if (smp_conn_security(conn, chan->sec_level))
966 l2cap_chan_ready(sk);
968 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
969 __clear_chan_timer(chan);
970 l2cap_state_change(chan, BT_CONNECTED);
971 sk->sk_state_change(sk);
973 } else if (chan->state == BT_CONNECT)
974 l2cap_do_start(chan);
979 mutex_unlock(&conn->chan_lock);
982 /* Notify sockets that we cannot guaranty reliability anymore */
983 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
985 struct l2cap_chan *chan;
987 BT_DBG("conn %p", conn);
989 mutex_lock(&conn->chan_lock);
991 list_for_each_entry(chan, &conn->chan_l, list) {
992 struct sock *sk = chan->sk;
994 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
998 mutex_unlock(&conn->chan_lock);
1001 static void l2cap_info_timeout(unsigned long arg)
1003 struct l2cap_conn *conn = (void *) arg;
1005 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1006 conn->info_ident = 0;
1008 l2cap_conn_start(conn);
1011 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1013 struct l2cap_conn *conn = hcon->l2cap_data;
1014 struct l2cap_chan *chan, *l;
1020 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1022 kfree_skb(conn->rx_skb);
1025 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1028 l2cap_chan_del(chan, err);
1030 chan->ops->close(chan->data);
1033 hci_chan_del(conn->hchan);
1035 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1036 del_timer_sync(&conn->info_timer);
1038 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1039 del_timer(&conn->security_timer);
1040 smp_chan_destroy(conn);
1043 hcon->l2cap_data = NULL;
1047 static void security_timeout(unsigned long arg)
1049 struct l2cap_conn *conn = (void *) arg;
1051 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1054 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1056 struct l2cap_conn *conn = hcon->l2cap_data;
1057 struct hci_chan *hchan;
1062 hchan = hci_chan_create(hcon);
1066 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1068 hci_chan_del(hchan);
1072 hcon->l2cap_data = conn;
1074 conn->hchan = hchan;
1076 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1078 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1079 conn->mtu = hcon->hdev->le_mtu;
1081 conn->mtu = hcon->hdev->acl_mtu;
1083 conn->src = &hcon->hdev->bdaddr;
1084 conn->dst = &hcon->dst;
1086 conn->feat_mask = 0;
1088 spin_lock_init(&conn->lock);
1089 mutex_init(&conn->chan_lock);
1091 INIT_LIST_HEAD(&conn->chan_l);
1093 if (hcon->type == LE_LINK)
1094 setup_timer(&conn->security_timer, security_timeout,
1095 (unsigned long) conn);
1097 setup_timer(&conn->info_timer, l2cap_info_timeout,
1098 (unsigned long) conn);
1100 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1105 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1107 mutex_lock(&conn->chan_lock);
1108 __l2cap_chan_add(conn, chan);
1109 mutex_unlock(&conn->chan_lock);
1112 /* ---- Socket interface ---- */
1114 /* Find socket with psm and source bdaddr.
1115 * Returns closest match.
1117 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1119 struct l2cap_chan *c, *c1 = NULL;
1121 read_lock(&chan_list_lock);
1123 list_for_each_entry(c, &chan_list, global_l) {
1124 struct sock *sk = c->sk;
1126 if (state && c->state != state)
1129 if (c->psm == psm) {
1131 if (!bacmp(&bt_sk(sk)->src, src)) {
1132 read_unlock(&chan_list_lock);
1137 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1142 read_unlock(&chan_list_lock);
1147 int l2cap_chan_connect(struct l2cap_chan *chan)
1149 struct sock *sk = chan->sk;
1150 bdaddr_t *src = &bt_sk(sk)->src;
1151 bdaddr_t *dst = &bt_sk(sk)->dst;
1152 struct l2cap_conn *conn;
1153 struct hci_conn *hcon;
1154 struct hci_dev *hdev;
1158 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1161 hdev = hci_get_route(dst, src);
1163 return -EHOSTUNREACH;
1167 auth_type = l2cap_get_auth_type(chan);
1169 if (chan->dcid == L2CAP_CID_LE_DATA)
1170 hcon = hci_connect(hdev, LE_LINK, dst,
1171 chan->sec_level, auth_type);
1173 hcon = hci_connect(hdev, ACL_LINK, dst,
1174 chan->sec_level, auth_type);
1177 err = PTR_ERR(hcon);
1181 conn = l2cap_conn_add(hcon, 0);
1188 /* Update source addr of the socket */
1189 bacpy(src, conn->src);
1191 l2cap_chan_add(conn, chan);
1193 l2cap_state_change(chan, BT_CONNECT);
1194 __set_chan_timer(chan, sk->sk_sndtimeo);
1196 if (hcon->state == BT_CONNECTED) {
1197 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1198 __clear_chan_timer(chan);
1199 if (l2cap_chan_check_security(chan))
1200 l2cap_state_change(chan, BT_CONNECTED);
1202 l2cap_do_start(chan);
1208 hci_dev_unlock(hdev);
1213 int __l2cap_wait_ack(struct sock *sk)
1215 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1216 DECLARE_WAITQUEUE(wait, current);
1220 add_wait_queue(sk_sleep(sk), &wait);
1221 set_current_state(TASK_INTERRUPTIBLE);
1222 while (chan->unacked_frames > 0 && chan->conn) {
1226 if (signal_pending(current)) {
1227 err = sock_intr_errno(timeo);
1232 timeo = schedule_timeout(timeo);
1234 set_current_state(TASK_INTERRUPTIBLE);
1236 err = sock_error(sk);
1240 set_current_state(TASK_RUNNING);
1241 remove_wait_queue(sk_sleep(sk), &wait);
1245 static void l2cap_monitor_timeout(struct work_struct *work)
1247 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1248 monitor_timer.work);
1249 struct sock *sk = chan->sk;
1251 BT_DBG("chan %p", chan);
1254 if (chan->retry_count >= chan->remote_max_tx) {
1255 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1260 chan->retry_count++;
1261 __set_monitor_timer(chan);
1263 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1267 static void l2cap_retrans_timeout(struct work_struct *work)
1269 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1270 retrans_timer.work);
1271 struct sock *sk = chan->sk;
1273 BT_DBG("chan %p", chan);
1276 chan->retry_count = 1;
1277 __set_monitor_timer(chan);
1279 set_bit(CONN_WAIT_F, &chan->conn_state);
1281 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1285 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1287 struct sk_buff *skb;
1289 while ((skb = skb_peek(&chan->tx_q)) &&
1290 chan->unacked_frames) {
1291 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1294 skb = skb_dequeue(&chan->tx_q);
1297 chan->unacked_frames--;
1300 if (!chan->unacked_frames)
1301 __clear_retrans_timer(chan);
1304 static void l2cap_streaming_send(struct l2cap_chan *chan)
1306 struct sk_buff *skb;
1310 while ((skb = skb_dequeue(&chan->tx_q))) {
1311 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1312 control |= __set_txseq(chan, chan->next_tx_seq);
1313 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1315 if (chan->fcs == L2CAP_FCS_CRC16) {
1316 fcs = crc16(0, (u8 *)skb->data,
1317 skb->len - L2CAP_FCS_SIZE);
1318 put_unaligned_le16(fcs,
1319 skb->data + skb->len - L2CAP_FCS_SIZE);
1322 l2cap_do_send(chan, skb);
1324 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1328 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1330 struct sk_buff *skb, *tx_skb;
1334 skb = skb_peek(&chan->tx_q);
1338 while (bt_cb(skb)->tx_seq != tx_seq) {
1339 if (skb_queue_is_last(&chan->tx_q, skb))
1342 skb = skb_queue_next(&chan->tx_q, skb);
1345 if (chan->remote_max_tx &&
1346 bt_cb(skb)->retries == chan->remote_max_tx) {
1347 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1351 tx_skb = skb_clone(skb, GFP_ATOMIC);
1352 bt_cb(skb)->retries++;
1354 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1355 control &= __get_sar_mask(chan);
1357 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1358 control |= __set_ctrl_final(chan);
1360 control |= __set_reqseq(chan, chan->buffer_seq);
1361 control |= __set_txseq(chan, tx_seq);
1363 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1365 if (chan->fcs == L2CAP_FCS_CRC16) {
1366 fcs = crc16(0, (u8 *)tx_skb->data,
1367 tx_skb->len - L2CAP_FCS_SIZE);
1368 put_unaligned_le16(fcs,
1369 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1372 l2cap_do_send(chan, tx_skb);
1375 static int l2cap_ertm_send(struct l2cap_chan *chan)
1377 struct sk_buff *skb, *tx_skb;
1382 if (chan->state != BT_CONNECTED)
1385 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1387 if (chan->remote_max_tx &&
1388 bt_cb(skb)->retries == chan->remote_max_tx) {
1389 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1393 tx_skb = skb_clone(skb, GFP_ATOMIC);
1395 bt_cb(skb)->retries++;
1397 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1398 control &= __get_sar_mask(chan);
1400 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1401 control |= __set_ctrl_final(chan);
1403 control |= __set_reqseq(chan, chan->buffer_seq);
1404 control |= __set_txseq(chan, chan->next_tx_seq);
1406 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1408 if (chan->fcs == L2CAP_FCS_CRC16) {
1409 fcs = crc16(0, (u8 *)skb->data,
1410 tx_skb->len - L2CAP_FCS_SIZE);
1411 put_unaligned_le16(fcs, skb->data +
1412 tx_skb->len - L2CAP_FCS_SIZE);
1415 l2cap_do_send(chan, tx_skb);
1417 __set_retrans_timer(chan);
1419 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1421 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1423 if (bt_cb(skb)->retries == 1)
1424 chan->unacked_frames++;
1426 chan->frames_sent++;
1428 if (skb_queue_is_last(&chan->tx_q, skb))
1429 chan->tx_send_head = NULL;
1431 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1439 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1443 if (!skb_queue_empty(&chan->tx_q))
1444 chan->tx_send_head = chan->tx_q.next;
1446 chan->next_tx_seq = chan->expected_ack_seq;
1447 ret = l2cap_ertm_send(chan);
1451 static void l2cap_send_ack(struct l2cap_chan *chan)
1455 control |= __set_reqseq(chan, chan->buffer_seq);
1457 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1458 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1459 set_bit(CONN_RNR_SENT, &chan->conn_state);
1460 l2cap_send_sframe(chan, control);
1464 if (l2cap_ertm_send(chan) > 0)
1467 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1468 l2cap_send_sframe(chan, control);
1471 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1473 struct srej_list *tail;
1476 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1477 control |= __set_ctrl_final(chan);
1479 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1480 control |= __set_reqseq(chan, tail->tx_seq);
1482 l2cap_send_sframe(chan, control);
1485 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1487 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1488 struct sk_buff **frag;
1491 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1497 /* Continuation fragments (no L2CAP header) */
1498 frag = &skb_shinfo(skb)->frag_list;
1500 count = min_t(unsigned int, conn->mtu, len);
1502 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1505 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1508 (*frag)->priority = skb->priority;
1513 frag = &(*frag)->next;
1519 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1520 struct msghdr *msg, size_t len,
1523 struct sock *sk = chan->sk;
1524 struct l2cap_conn *conn = chan->conn;
1525 struct sk_buff *skb;
1526 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1527 struct l2cap_hdr *lh;
1529 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1531 count = min_t(unsigned int, (conn->mtu - hlen), len);
1532 skb = bt_skb_send_alloc(sk, count + hlen,
1533 msg->msg_flags & MSG_DONTWAIT, &err);
1535 return ERR_PTR(err);
1537 skb->priority = priority;
1539 /* Create L2CAP header */
1540 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1541 lh->cid = cpu_to_le16(chan->dcid);
1542 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1543 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1545 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1546 if (unlikely(err < 0)) {
1548 return ERR_PTR(err);
1553 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1554 struct msghdr *msg, size_t len,
1557 struct sock *sk = chan->sk;
1558 struct l2cap_conn *conn = chan->conn;
1559 struct sk_buff *skb;
1560 int err, count, hlen = L2CAP_HDR_SIZE;
1561 struct l2cap_hdr *lh;
1563 BT_DBG("sk %p len %d", sk, (int)len);
1565 count = min_t(unsigned int, (conn->mtu - hlen), len);
1566 skb = bt_skb_send_alloc(sk, count + hlen,
1567 msg->msg_flags & MSG_DONTWAIT, &err);
1569 return ERR_PTR(err);
1571 skb->priority = priority;
1573 /* Create L2CAP header */
1574 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1575 lh->cid = cpu_to_le16(chan->dcid);
1576 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1578 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1579 if (unlikely(err < 0)) {
1581 return ERR_PTR(err);
1586 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1587 struct msghdr *msg, size_t len,
1588 u32 control, u16 sdulen)
1590 struct sock *sk = chan->sk;
1591 struct l2cap_conn *conn = chan->conn;
1592 struct sk_buff *skb;
1593 int err, count, hlen;
1594 struct l2cap_hdr *lh;
1596 BT_DBG("sk %p len %d", sk, (int)len);
1599 return ERR_PTR(-ENOTCONN);
1601 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1602 hlen = L2CAP_EXT_HDR_SIZE;
1604 hlen = L2CAP_ENH_HDR_SIZE;
1607 hlen += L2CAP_SDULEN_SIZE;
1609 if (chan->fcs == L2CAP_FCS_CRC16)
1610 hlen += L2CAP_FCS_SIZE;
1612 count = min_t(unsigned int, (conn->mtu - hlen), len);
1613 skb = bt_skb_send_alloc(sk, count + hlen,
1614 msg->msg_flags & MSG_DONTWAIT, &err);
1616 return ERR_PTR(err);
1618 /* Create L2CAP header */
1619 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1620 lh->cid = cpu_to_le16(chan->dcid);
1621 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1623 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1626 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1628 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1629 if (unlikely(err < 0)) {
1631 return ERR_PTR(err);
1634 if (chan->fcs == L2CAP_FCS_CRC16)
1635 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1637 bt_cb(skb)->retries = 0;
1641 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1643 struct sk_buff *skb;
1644 struct sk_buff_head sar_queue;
1648 skb_queue_head_init(&sar_queue);
1649 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1650 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1652 return PTR_ERR(skb);
1654 __skb_queue_tail(&sar_queue, skb);
1655 len -= chan->remote_mps;
1656 size += chan->remote_mps;
1661 if (len > chan->remote_mps) {
1662 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1663 buflen = chan->remote_mps;
1665 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1669 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1671 skb_queue_purge(&sar_queue);
1672 return PTR_ERR(skb);
1675 __skb_queue_tail(&sar_queue, skb);
1679 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1680 if (chan->tx_send_head == NULL)
1681 chan->tx_send_head = sar_queue.next;
1686 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1689 struct sk_buff *skb;
1693 /* Connectionless channel */
1694 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1695 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1697 return PTR_ERR(skb);
1699 l2cap_do_send(chan, skb);
1703 switch (chan->mode) {
1704 case L2CAP_MODE_BASIC:
1705 /* Check outgoing MTU */
1706 if (len > chan->omtu)
1709 /* Create a basic PDU */
1710 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1712 return PTR_ERR(skb);
1714 l2cap_do_send(chan, skb);
1718 case L2CAP_MODE_ERTM:
1719 case L2CAP_MODE_STREAMING:
1720 /* Entire SDU fits into one PDU */
1721 if (len <= chan->remote_mps) {
1722 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1723 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1726 return PTR_ERR(skb);
1728 __skb_queue_tail(&chan->tx_q, skb);
1730 if (chan->tx_send_head == NULL)
1731 chan->tx_send_head = skb;
1734 /* Segment SDU into multiples PDUs */
1735 err = l2cap_sar_segment_sdu(chan, msg, len);
1740 if (chan->mode == L2CAP_MODE_STREAMING) {
1741 l2cap_streaming_send(chan);
1746 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1747 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1752 err = l2cap_ertm_send(chan);
1759 BT_DBG("bad state %1.1x", chan->mode);
1766 /* Copy frame to all raw sockets on that connection */
1767 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1769 struct sk_buff *nskb;
1770 struct l2cap_chan *chan;
1772 BT_DBG("conn %p", conn);
1774 mutex_lock(&conn->chan_lock);
1775 list_for_each_entry(chan, &conn->chan_l, list) {
1776 struct sock *sk = chan->sk;
1777 if (chan->chan_type != L2CAP_CHAN_RAW)
1780 /* Don't send frame to the socket it came from */
1783 nskb = skb_clone(skb, GFP_ATOMIC);
1787 if (chan->ops->recv(chan->data, nskb))
1790 mutex_unlock(&conn->chan_lock);
1793 /* ---- L2CAP signalling commands ---- */
1794 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1795 u8 code, u8 ident, u16 dlen, void *data)
1797 struct sk_buff *skb, **frag;
1798 struct l2cap_cmd_hdr *cmd;
1799 struct l2cap_hdr *lh;
1802 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1803 conn, code, ident, dlen);
1805 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1806 count = min_t(unsigned int, conn->mtu, len);
1808 skb = bt_skb_alloc(count, GFP_ATOMIC);
1812 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1813 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1815 if (conn->hcon->type == LE_LINK)
1816 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1818 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1820 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1823 cmd->len = cpu_to_le16(dlen);
1826 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1827 memcpy(skb_put(skb, count), data, count);
1833 /* Continuation fragments (no L2CAP header) */
1834 frag = &skb_shinfo(skb)->frag_list;
1836 count = min_t(unsigned int, conn->mtu, len);
1838 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1842 memcpy(skb_put(*frag, count), data, count);
1847 frag = &(*frag)->next;
1857 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1859 struct l2cap_conf_opt *opt = *ptr;
1862 len = L2CAP_CONF_OPT_SIZE + opt->len;
1870 *val = *((u8 *) opt->val);
1874 *val = get_unaligned_le16(opt->val);
1878 *val = get_unaligned_le32(opt->val);
1882 *val = (unsigned long) opt->val;
1886 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1890 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1892 struct l2cap_conf_opt *opt = *ptr;
1894 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1901 *((u8 *) opt->val) = val;
1905 put_unaligned_le16(val, opt->val);
1909 put_unaligned_le32(val, opt->val);
1913 memcpy(opt->val, (void *) val, len);
1917 *ptr += L2CAP_CONF_OPT_SIZE + len;
1920 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1922 struct l2cap_conf_efs efs;
1924 switch (chan->mode) {
1925 case L2CAP_MODE_ERTM:
1926 efs.id = chan->local_id;
1927 efs.stype = chan->local_stype;
1928 efs.msdu = cpu_to_le16(chan->local_msdu);
1929 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1930 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1931 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1934 case L2CAP_MODE_STREAMING:
1936 efs.stype = L2CAP_SERV_BESTEFFORT;
1937 efs.msdu = cpu_to_le16(chan->local_msdu);
1938 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1947 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1948 (unsigned long) &efs);
1951 static void l2cap_ack_timeout(struct work_struct *work)
1953 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1956 lock_sock(chan->sk);
1957 l2cap_send_ack(chan);
1958 release_sock(chan->sk);
1961 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1963 chan->expected_ack_seq = 0;
1964 chan->unacked_frames = 0;
1965 chan->buffer_seq = 0;
1966 chan->num_acked = 0;
1967 chan->frames_sent = 0;
1969 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
1970 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
1971 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
1973 skb_queue_head_init(&chan->srej_q);
1975 INIT_LIST_HEAD(&chan->srej_l);
1978 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1981 case L2CAP_MODE_STREAMING:
1982 case L2CAP_MODE_ERTM:
1983 if (l2cap_mode_supported(mode, remote_feat_mask))
1987 return L2CAP_MODE_BASIC;
1991 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1993 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1996 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1998 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2001 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2003 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2004 __l2cap_ews_supported(chan)) {
2005 /* use extended control field */
2006 set_bit(FLAG_EXT_CTRL, &chan->flags);
2007 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2009 chan->tx_win = min_t(u16, chan->tx_win,
2010 L2CAP_DEFAULT_TX_WINDOW);
2011 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2015 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2017 struct l2cap_conf_req *req = data;
2018 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2019 void *ptr = req->data;
2022 BT_DBG("chan %p", chan);
2024 if (chan->num_conf_req || chan->num_conf_rsp)
2027 switch (chan->mode) {
2028 case L2CAP_MODE_STREAMING:
2029 case L2CAP_MODE_ERTM:
2030 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2033 if (__l2cap_efs_supported(chan))
2034 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2038 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2043 if (chan->imtu != L2CAP_DEFAULT_MTU)
2044 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2046 switch (chan->mode) {
2047 case L2CAP_MODE_BASIC:
2048 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2049 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2052 rfc.mode = L2CAP_MODE_BASIC;
2054 rfc.max_transmit = 0;
2055 rfc.retrans_timeout = 0;
2056 rfc.monitor_timeout = 0;
2057 rfc.max_pdu_size = 0;
2059 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2060 (unsigned long) &rfc);
2063 case L2CAP_MODE_ERTM:
2064 rfc.mode = L2CAP_MODE_ERTM;
2065 rfc.max_transmit = chan->max_tx;
2066 rfc.retrans_timeout = 0;
2067 rfc.monitor_timeout = 0;
2069 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2070 L2CAP_EXT_HDR_SIZE -
2073 rfc.max_pdu_size = cpu_to_le16(size);
2075 l2cap_txwin_setup(chan);
2077 rfc.txwin_size = min_t(u16, chan->tx_win,
2078 L2CAP_DEFAULT_TX_WINDOW);
2080 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2081 (unsigned long) &rfc);
2083 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2084 l2cap_add_opt_efs(&ptr, chan);
2086 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2089 if (chan->fcs == L2CAP_FCS_NONE ||
2090 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2091 chan->fcs = L2CAP_FCS_NONE;
2092 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2095 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2096 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2100 case L2CAP_MODE_STREAMING:
2101 rfc.mode = L2CAP_MODE_STREAMING;
2103 rfc.max_transmit = 0;
2104 rfc.retrans_timeout = 0;
2105 rfc.monitor_timeout = 0;
2107 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2108 L2CAP_EXT_HDR_SIZE -
2111 rfc.max_pdu_size = cpu_to_le16(size);
2113 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2114 (unsigned long) &rfc);
2116 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2117 l2cap_add_opt_efs(&ptr, chan);
2119 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2122 if (chan->fcs == L2CAP_FCS_NONE ||
2123 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2124 chan->fcs = L2CAP_FCS_NONE;
2125 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2130 req->dcid = cpu_to_le16(chan->dcid);
2131 req->flags = cpu_to_le16(0);
2136 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2138 struct l2cap_conf_rsp *rsp = data;
2139 void *ptr = rsp->data;
2140 void *req = chan->conf_req;
2141 int len = chan->conf_len;
2142 int type, hint, olen;
2144 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2145 struct l2cap_conf_efs efs;
2147 u16 mtu = L2CAP_DEFAULT_MTU;
2148 u16 result = L2CAP_CONF_SUCCESS;
2151 BT_DBG("chan %p", chan);
2153 while (len >= L2CAP_CONF_OPT_SIZE) {
2154 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2156 hint = type & L2CAP_CONF_HINT;
2157 type &= L2CAP_CONF_MASK;
2160 case L2CAP_CONF_MTU:
2164 case L2CAP_CONF_FLUSH_TO:
2165 chan->flush_to = val;
2168 case L2CAP_CONF_QOS:
2171 case L2CAP_CONF_RFC:
2172 if (olen == sizeof(rfc))
2173 memcpy(&rfc, (void *) val, olen);
2176 case L2CAP_CONF_FCS:
2177 if (val == L2CAP_FCS_NONE)
2178 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2181 case L2CAP_CONF_EFS:
2183 if (olen == sizeof(efs))
2184 memcpy(&efs, (void *) val, olen);
2187 case L2CAP_CONF_EWS:
2189 return -ECONNREFUSED;
2191 set_bit(FLAG_EXT_CTRL, &chan->flags);
2192 set_bit(CONF_EWS_RECV, &chan->conf_state);
2193 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2194 chan->remote_tx_win = val;
2201 result = L2CAP_CONF_UNKNOWN;
2202 *((u8 *) ptr++) = type;
2207 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2210 switch (chan->mode) {
2211 case L2CAP_MODE_STREAMING:
2212 case L2CAP_MODE_ERTM:
2213 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2214 chan->mode = l2cap_select_mode(rfc.mode,
2215 chan->conn->feat_mask);
2220 if (__l2cap_efs_supported(chan))
2221 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2223 return -ECONNREFUSED;
2226 if (chan->mode != rfc.mode)
2227 return -ECONNREFUSED;
2233 if (chan->mode != rfc.mode) {
2234 result = L2CAP_CONF_UNACCEPT;
2235 rfc.mode = chan->mode;
2237 if (chan->num_conf_rsp == 1)
2238 return -ECONNREFUSED;
2240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2241 sizeof(rfc), (unsigned long) &rfc);
2244 if (result == L2CAP_CONF_SUCCESS) {
2245 /* Configure output options and let the other side know
2246 * which ones we don't like. */
2248 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2249 result = L2CAP_CONF_UNACCEPT;
2252 set_bit(CONF_MTU_DONE, &chan->conf_state);
2254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2257 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2258 efs.stype != L2CAP_SERV_NOTRAFIC &&
2259 efs.stype != chan->local_stype) {
2261 result = L2CAP_CONF_UNACCEPT;
2263 if (chan->num_conf_req >= 1)
2264 return -ECONNREFUSED;
2266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2268 (unsigned long) &efs);
2270 /* Send PENDING Conf Rsp */
2271 result = L2CAP_CONF_PENDING;
2272 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2277 case L2CAP_MODE_BASIC:
2278 chan->fcs = L2CAP_FCS_NONE;
2279 set_bit(CONF_MODE_DONE, &chan->conf_state);
2282 case L2CAP_MODE_ERTM:
2283 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2284 chan->remote_tx_win = rfc.txwin_size;
2286 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2288 chan->remote_max_tx = rfc.max_transmit;
2290 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2292 L2CAP_EXT_HDR_SIZE -
2295 rfc.max_pdu_size = cpu_to_le16(size);
2296 chan->remote_mps = size;
2298 rfc.retrans_timeout =
2299 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2300 rfc.monitor_timeout =
2301 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2303 set_bit(CONF_MODE_DONE, &chan->conf_state);
2305 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2306 sizeof(rfc), (unsigned long) &rfc);
2308 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2309 chan->remote_id = efs.id;
2310 chan->remote_stype = efs.stype;
2311 chan->remote_msdu = le16_to_cpu(efs.msdu);
2312 chan->remote_flush_to =
2313 le32_to_cpu(efs.flush_to);
2314 chan->remote_acc_lat =
2315 le32_to_cpu(efs.acc_lat);
2316 chan->remote_sdu_itime =
2317 le32_to_cpu(efs.sdu_itime);
2318 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2319 sizeof(efs), (unsigned long) &efs);
2323 case L2CAP_MODE_STREAMING:
2324 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2326 L2CAP_EXT_HDR_SIZE -
2329 rfc.max_pdu_size = cpu_to_le16(size);
2330 chan->remote_mps = size;
2332 set_bit(CONF_MODE_DONE, &chan->conf_state);
2334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2335 sizeof(rfc), (unsigned long) &rfc);
2340 result = L2CAP_CONF_UNACCEPT;
2342 memset(&rfc, 0, sizeof(rfc));
2343 rfc.mode = chan->mode;
2346 if (result == L2CAP_CONF_SUCCESS)
2347 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2349 rsp->scid = cpu_to_le16(chan->dcid);
2350 rsp->result = cpu_to_le16(result);
2351 rsp->flags = cpu_to_le16(0x0000);
2356 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2358 struct l2cap_conf_req *req = data;
2359 void *ptr = req->data;
2362 struct l2cap_conf_rfc rfc;
2363 struct l2cap_conf_efs efs;
2365 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2367 while (len >= L2CAP_CONF_OPT_SIZE) {
2368 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2371 case L2CAP_CONF_MTU:
2372 if (val < L2CAP_DEFAULT_MIN_MTU) {
2373 *result = L2CAP_CONF_UNACCEPT;
2374 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2380 case L2CAP_CONF_FLUSH_TO:
2381 chan->flush_to = val;
2382 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2386 case L2CAP_CONF_RFC:
2387 if (olen == sizeof(rfc))
2388 memcpy(&rfc, (void *)val, olen);
2390 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2391 rfc.mode != chan->mode)
2392 return -ECONNREFUSED;
2396 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2397 sizeof(rfc), (unsigned long) &rfc);
2400 case L2CAP_CONF_EWS:
2401 chan->tx_win = min_t(u16, val,
2402 L2CAP_DEFAULT_EXT_WINDOW);
2403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2407 case L2CAP_CONF_EFS:
2408 if (olen == sizeof(efs))
2409 memcpy(&efs, (void *)val, olen);
2411 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2412 efs.stype != L2CAP_SERV_NOTRAFIC &&
2413 efs.stype != chan->local_stype)
2414 return -ECONNREFUSED;
2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2417 sizeof(efs), (unsigned long) &efs);
2422 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2423 return -ECONNREFUSED;
2425 chan->mode = rfc.mode;
2427 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2429 case L2CAP_MODE_ERTM:
2430 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2431 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2432 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2434 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2435 chan->local_msdu = le16_to_cpu(efs.msdu);
2436 chan->local_sdu_itime =
2437 le32_to_cpu(efs.sdu_itime);
2438 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2439 chan->local_flush_to =
2440 le32_to_cpu(efs.flush_to);
2444 case L2CAP_MODE_STREAMING:
2445 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2449 req->dcid = cpu_to_le16(chan->dcid);
2450 req->flags = cpu_to_le16(0x0000);
2455 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2457 struct l2cap_conf_rsp *rsp = data;
2458 void *ptr = rsp->data;
2460 BT_DBG("chan %p", chan);
2462 rsp->scid = cpu_to_le16(chan->dcid);
2463 rsp->result = cpu_to_le16(result);
2464 rsp->flags = cpu_to_le16(flags);
2469 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2471 struct l2cap_conn_rsp rsp;
2472 struct l2cap_conn *conn = chan->conn;
2475 rsp.scid = cpu_to_le16(chan->dcid);
2476 rsp.dcid = cpu_to_le16(chan->scid);
2477 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2479 l2cap_send_cmd(conn, chan->ident,
2480 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2482 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2485 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2486 l2cap_build_conf_req(chan, buf), buf);
2487 chan->num_conf_req++;
2490 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2494 struct l2cap_conf_rfc rfc;
2496 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2498 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2501 while (len >= L2CAP_CONF_OPT_SIZE) {
2502 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2505 case L2CAP_CONF_RFC:
2506 if (olen == sizeof(rfc))
2507 memcpy(&rfc, (void *)val, olen);
2514 case L2CAP_MODE_ERTM:
2515 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2516 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2517 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2519 case L2CAP_MODE_STREAMING:
2520 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2524 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2526 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2528 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2531 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2532 cmd->ident == conn->info_ident) {
2533 del_timer(&conn->info_timer);
2535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2536 conn->info_ident = 0;
2538 l2cap_conn_start(conn);
2544 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2546 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2547 struct l2cap_conn_rsp rsp;
2548 struct l2cap_chan *chan = NULL, *pchan;
2549 struct sock *parent, *sk = NULL;
2550 int result, status = L2CAP_CS_NO_INFO;
2552 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2553 __le16 psm = req->psm;
2555 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2557 /* Check if we have socket listening on psm */
2558 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2560 result = L2CAP_CR_BAD_PSM;
2568 /* Check if the ACL is secure enough (if not SDP) */
2569 if (psm != cpu_to_le16(0x0001) &&
2570 !hci_conn_check_link_mode(conn->hcon)) {
2571 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2572 result = L2CAP_CR_SEC_BLOCK;
2576 result = L2CAP_CR_NO_MEM;
2578 /* Check for backlog size */
2579 if (sk_acceptq_is_full(parent)) {
2580 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2584 chan = pchan->ops->new_connection(pchan->data);
2590 mutex_lock(&conn->chan_lock);
2592 /* Check if we already have channel with that dcid */
2593 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2594 mutex_unlock(&conn->chan_lock);
2595 sock_set_flag(sk, SOCK_ZAPPED);
2596 chan->ops->close(chan->data);
2600 hci_conn_hold(conn->hcon);
2602 bacpy(&bt_sk(sk)->src, conn->src);
2603 bacpy(&bt_sk(sk)->dst, conn->dst);
2607 bt_accept_enqueue(parent, sk);
2609 __l2cap_chan_add(conn, chan);
2613 __set_chan_timer(chan, sk->sk_sndtimeo);
2615 chan->ident = cmd->ident;
2617 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2618 if (l2cap_chan_check_security(chan)) {
2619 if (bt_sk(sk)->defer_setup) {
2620 l2cap_state_change(chan, BT_CONNECT2);
2621 result = L2CAP_CR_PEND;
2622 status = L2CAP_CS_AUTHOR_PEND;
2623 parent->sk_data_ready(parent, 0);
2625 l2cap_state_change(chan, BT_CONFIG);
2626 result = L2CAP_CR_SUCCESS;
2627 status = L2CAP_CS_NO_INFO;
2630 l2cap_state_change(chan, BT_CONNECT2);
2631 result = L2CAP_CR_PEND;
2632 status = L2CAP_CS_AUTHEN_PEND;
2635 l2cap_state_change(chan, BT_CONNECT2);
2636 result = L2CAP_CR_PEND;
2637 status = L2CAP_CS_NO_INFO;
2640 mutex_unlock(&conn->chan_lock);
2643 release_sock(parent);
2646 rsp.scid = cpu_to_le16(scid);
2647 rsp.dcid = cpu_to_le16(dcid);
2648 rsp.result = cpu_to_le16(result);
2649 rsp.status = cpu_to_le16(status);
2650 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2652 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2653 struct l2cap_info_req info;
2654 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2657 conn->info_ident = l2cap_get_ident(conn);
2659 mod_timer(&conn->info_timer, jiffies +
2660 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2662 l2cap_send_cmd(conn, conn->info_ident,
2663 L2CAP_INFO_REQ, sizeof(info), &info);
2666 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2667 result == L2CAP_CR_SUCCESS) {
2669 set_bit(CONF_REQ_SENT, &chan->conf_state);
2670 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2671 l2cap_build_conf_req(chan, buf), buf);
2672 chan->num_conf_req++;
2678 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2680 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2681 u16 scid, dcid, result, status;
2682 struct l2cap_chan *chan;
2686 scid = __le16_to_cpu(rsp->scid);
2687 dcid = __le16_to_cpu(rsp->dcid);
2688 result = __le16_to_cpu(rsp->result);
2689 status = __le16_to_cpu(rsp->status);
2691 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2694 chan = l2cap_get_chan_by_scid(conn, scid);
2698 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2706 case L2CAP_CR_SUCCESS:
2707 l2cap_state_change(chan, BT_CONFIG);
2710 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2712 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2715 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2716 l2cap_build_conf_req(chan, req), req);
2717 chan->num_conf_req++;
2721 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2725 l2cap_chan_del(chan, ECONNREFUSED);
2733 static inline void set_default_fcs(struct l2cap_chan *chan)
2735 /* FCS is enabled only in ERTM or streaming mode, if one or both
2738 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2739 chan->fcs = L2CAP_FCS_NONE;
2740 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2741 chan->fcs = L2CAP_FCS_CRC16;
2744 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2746 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2749 struct l2cap_chan *chan;
2753 dcid = __le16_to_cpu(req->dcid);
2754 flags = __le16_to_cpu(req->flags);
2756 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2758 chan = l2cap_get_chan_by_scid(conn, dcid);
2764 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2765 struct l2cap_cmd_rej_cid rej;
2767 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2768 rej.scid = cpu_to_le16(chan->scid);
2769 rej.dcid = cpu_to_le16(chan->dcid);
2771 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2776 /* Reject if config buffer is too small. */
2777 len = cmd_len - sizeof(*req);
2778 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2779 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2780 l2cap_build_conf_rsp(chan, rsp,
2781 L2CAP_CONF_REJECT, flags), rsp);
2786 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2787 chan->conf_len += len;
2789 if (flags & 0x0001) {
2790 /* Incomplete config. Send empty response. */
2791 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2792 l2cap_build_conf_rsp(chan, rsp,
2793 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2797 /* Complete config. */
2798 len = l2cap_parse_conf_req(chan, rsp);
2800 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2804 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2805 chan->num_conf_rsp++;
2807 /* Reset config buffer. */
2810 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2813 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2814 set_default_fcs(chan);
2816 l2cap_state_change(chan, BT_CONNECTED);
2818 chan->next_tx_seq = 0;
2819 chan->expected_tx_seq = 0;
2820 skb_queue_head_init(&chan->tx_q);
2821 if (chan->mode == L2CAP_MODE_ERTM)
2822 l2cap_ertm_init(chan);
2824 l2cap_chan_ready(sk);
2828 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2831 l2cap_build_conf_req(chan, buf), buf);
2832 chan->num_conf_req++;
2835 /* Got Conf Rsp PENDING from remote side and asume we sent
2836 Conf Rsp PENDING in the code above */
2837 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2838 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2840 /* check compatibility */
2842 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2843 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2845 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2846 l2cap_build_conf_rsp(chan, rsp,
2847 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2855 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2857 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2858 u16 scid, flags, result;
2859 struct l2cap_chan *chan;
2861 int len = cmd->len - sizeof(*rsp);
2863 scid = __le16_to_cpu(rsp->scid);
2864 flags = __le16_to_cpu(rsp->flags);
2865 result = __le16_to_cpu(rsp->result);
2867 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2868 scid, flags, result);
2870 chan = l2cap_get_chan_by_scid(conn, scid);
2877 case L2CAP_CONF_SUCCESS:
2878 l2cap_conf_rfc_get(chan, rsp->data, len);
2879 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2882 case L2CAP_CONF_PENDING:
2883 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2885 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2888 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2891 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2895 /* check compatibility */
2897 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2898 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2900 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2901 l2cap_build_conf_rsp(chan, buf,
2902 L2CAP_CONF_SUCCESS, 0x0000), buf);
2906 case L2CAP_CONF_UNACCEPT:
2907 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2910 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2911 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2915 /* throw out any old stored conf requests */
2916 result = L2CAP_CONF_SUCCESS;
2917 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2920 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2924 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2925 L2CAP_CONF_REQ, len, req);
2926 chan->num_conf_req++;
2927 if (result != L2CAP_CONF_SUCCESS)
2933 sk->sk_err = ECONNRESET;
2934 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2935 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2942 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2944 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2945 set_default_fcs(chan);
2947 l2cap_state_change(chan, BT_CONNECTED);
2948 chan->next_tx_seq = 0;
2949 chan->expected_tx_seq = 0;
2950 skb_queue_head_init(&chan->tx_q);
2951 if (chan->mode == L2CAP_MODE_ERTM)
2952 l2cap_ertm_init(chan);
2954 l2cap_chan_ready(sk);
2962 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2964 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2965 struct l2cap_disconn_rsp rsp;
2967 struct l2cap_chan *chan;
2970 scid = __le16_to_cpu(req->scid);
2971 dcid = __le16_to_cpu(req->dcid);
2973 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2975 chan = l2cap_get_chan_by_scid(conn, dcid);
2981 rsp.dcid = cpu_to_le16(chan->scid);
2982 rsp.scid = cpu_to_le16(chan->dcid);
2983 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2985 sk->sk_shutdown = SHUTDOWN_MASK;
2987 l2cap_chan_del(chan, ECONNRESET);
2990 chan->ops->close(chan->data);
2994 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2996 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2998 struct l2cap_chan *chan;
3001 scid = __le16_to_cpu(rsp->scid);
3002 dcid = __le16_to_cpu(rsp->dcid);
3004 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3006 chan = l2cap_get_chan_by_scid(conn, scid);
3012 l2cap_chan_del(chan, 0);
3015 chan->ops->close(chan->data);
3019 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3021 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3024 type = __le16_to_cpu(req->type);
3026 BT_DBG("type 0x%4.4x", type);
3028 if (type == L2CAP_IT_FEAT_MASK) {
3030 u32 feat_mask = l2cap_feat_mask;
3031 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3032 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3033 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3035 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3038 feat_mask |= L2CAP_FEAT_EXT_FLOW
3039 | L2CAP_FEAT_EXT_WINDOW;
3041 put_unaligned_le32(feat_mask, rsp->data);
3042 l2cap_send_cmd(conn, cmd->ident,
3043 L2CAP_INFO_RSP, sizeof(buf), buf);
3044 } else if (type == L2CAP_IT_FIXED_CHAN) {
3046 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3049 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3051 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3053 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3054 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3055 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3056 l2cap_send_cmd(conn, cmd->ident,
3057 L2CAP_INFO_RSP, sizeof(buf), buf);
3059 struct l2cap_info_rsp rsp;
3060 rsp.type = cpu_to_le16(type);
3061 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3062 l2cap_send_cmd(conn, cmd->ident,
3063 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3069 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3071 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3074 type = __le16_to_cpu(rsp->type);
3075 result = __le16_to_cpu(rsp->result);
3077 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3079 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3080 if (cmd->ident != conn->info_ident ||
3081 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3084 del_timer(&conn->info_timer);
3086 if (result != L2CAP_IR_SUCCESS) {
3087 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3088 conn->info_ident = 0;
3090 l2cap_conn_start(conn);
3095 if (type == L2CAP_IT_FEAT_MASK) {
3096 conn->feat_mask = get_unaligned_le32(rsp->data);
3098 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3099 struct l2cap_info_req req;
3100 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3102 conn->info_ident = l2cap_get_ident(conn);
3104 l2cap_send_cmd(conn, conn->info_ident,
3105 L2CAP_INFO_REQ, sizeof(req), &req);
3107 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3108 conn->info_ident = 0;
3110 l2cap_conn_start(conn);
3112 } else if (type == L2CAP_IT_FIXED_CHAN) {
3113 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3114 conn->info_ident = 0;
3116 l2cap_conn_start(conn);
3122 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3123 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3126 struct l2cap_create_chan_req *req = data;
3127 struct l2cap_create_chan_rsp rsp;
3130 if (cmd_len != sizeof(*req))
3136 psm = le16_to_cpu(req->psm);
3137 scid = le16_to_cpu(req->scid);
3139 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3141 /* Placeholder: Always reject */
3143 rsp.scid = cpu_to_le16(scid);
3144 rsp.result = L2CAP_CR_NO_MEM;
3145 rsp.status = L2CAP_CS_NO_INFO;
3147 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3153 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3154 struct l2cap_cmd_hdr *cmd, void *data)
3156 BT_DBG("conn %p", conn);
3158 return l2cap_connect_rsp(conn, cmd, data);
3161 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3162 u16 icid, u16 result)
3164 struct l2cap_move_chan_rsp rsp;
3166 BT_DBG("icid %d, result %d", icid, result);
3168 rsp.icid = cpu_to_le16(icid);
3169 rsp.result = cpu_to_le16(result);
3171 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3174 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3175 struct l2cap_chan *chan, u16 icid, u16 result)
3177 struct l2cap_move_chan_cfm cfm;
3180 BT_DBG("icid %d, result %d", icid, result);
3182 ident = l2cap_get_ident(conn);
3184 chan->ident = ident;
3186 cfm.icid = cpu_to_le16(icid);
3187 cfm.result = cpu_to_le16(result);
3189 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3192 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3195 struct l2cap_move_chan_cfm_rsp rsp;
3197 BT_DBG("icid %d", icid);
3199 rsp.icid = cpu_to_le16(icid);
3200 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3203 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3204 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3206 struct l2cap_move_chan_req *req = data;
3208 u16 result = L2CAP_MR_NOT_ALLOWED;
3210 if (cmd_len != sizeof(*req))
3213 icid = le16_to_cpu(req->icid);
3215 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3220 /* Placeholder: Always refuse */
3221 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3226 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3227 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3229 struct l2cap_move_chan_rsp *rsp = data;
3232 if (cmd_len != sizeof(*rsp))
3235 icid = le16_to_cpu(rsp->icid);
3236 result = le16_to_cpu(rsp->result);
3238 BT_DBG("icid %d, result %d", icid, result);
3240 /* Placeholder: Always unconfirmed */
3241 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3246 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3247 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3249 struct l2cap_move_chan_cfm *cfm = data;
3252 if (cmd_len != sizeof(*cfm))
3255 icid = le16_to_cpu(cfm->icid);
3256 result = le16_to_cpu(cfm->result);
3258 BT_DBG("icid %d, result %d", icid, result);
3260 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3265 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3266 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3268 struct l2cap_move_chan_cfm_rsp *rsp = data;
3271 if (cmd_len != sizeof(*rsp))
3274 icid = le16_to_cpu(rsp->icid);
3276 BT_DBG("icid %d", icid);
3281 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3286 if (min > max || min < 6 || max > 3200)
3289 if (to_multiplier < 10 || to_multiplier > 3200)
3292 if (max >= to_multiplier * 8)
3295 max_latency = (to_multiplier * 8 / max) - 1;
3296 if (latency > 499 || latency > max_latency)
3302 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3303 struct l2cap_cmd_hdr *cmd, u8 *data)
3305 struct hci_conn *hcon = conn->hcon;
3306 struct l2cap_conn_param_update_req *req;
3307 struct l2cap_conn_param_update_rsp rsp;
3308 u16 min, max, latency, to_multiplier, cmd_len;
3311 if (!(hcon->link_mode & HCI_LM_MASTER))
3314 cmd_len = __le16_to_cpu(cmd->len);
3315 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3318 req = (struct l2cap_conn_param_update_req *) data;
3319 min = __le16_to_cpu(req->min);
3320 max = __le16_to_cpu(req->max);
3321 latency = __le16_to_cpu(req->latency);
3322 to_multiplier = __le16_to_cpu(req->to_multiplier);
3324 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3325 min, max, latency, to_multiplier);
3327 memset(&rsp, 0, sizeof(rsp));
3329 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3331 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3333 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3335 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3339 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3344 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3345 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3349 switch (cmd->code) {
3350 case L2CAP_COMMAND_REJ:
3351 l2cap_command_rej(conn, cmd, data);
3354 case L2CAP_CONN_REQ:
3355 err = l2cap_connect_req(conn, cmd, data);
3358 case L2CAP_CONN_RSP:
3359 err = l2cap_connect_rsp(conn, cmd, data);
3362 case L2CAP_CONF_REQ:
3363 err = l2cap_config_req(conn, cmd, cmd_len, data);
3366 case L2CAP_CONF_RSP:
3367 err = l2cap_config_rsp(conn, cmd, data);
3370 case L2CAP_DISCONN_REQ:
3371 err = l2cap_disconnect_req(conn, cmd, data);
3374 case L2CAP_DISCONN_RSP:
3375 err = l2cap_disconnect_rsp(conn, cmd, data);
3378 case L2CAP_ECHO_REQ:
3379 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3382 case L2CAP_ECHO_RSP:
3385 case L2CAP_INFO_REQ:
3386 err = l2cap_information_req(conn, cmd, data);
3389 case L2CAP_INFO_RSP:
3390 err = l2cap_information_rsp(conn, cmd, data);
3393 case L2CAP_CREATE_CHAN_REQ:
3394 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3397 case L2CAP_CREATE_CHAN_RSP:
3398 err = l2cap_create_channel_rsp(conn, cmd, data);
3401 case L2CAP_MOVE_CHAN_REQ:
3402 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3405 case L2CAP_MOVE_CHAN_RSP:
3406 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3409 case L2CAP_MOVE_CHAN_CFM:
3410 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3413 case L2CAP_MOVE_CHAN_CFM_RSP:
3414 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3418 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3426 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3427 struct l2cap_cmd_hdr *cmd, u8 *data)
3429 switch (cmd->code) {
3430 case L2CAP_COMMAND_REJ:
3433 case L2CAP_CONN_PARAM_UPDATE_REQ:
3434 return l2cap_conn_param_update_req(conn, cmd, data);
3436 case L2CAP_CONN_PARAM_UPDATE_RSP:
3440 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3445 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3446 struct sk_buff *skb)
3448 u8 *data = skb->data;
3450 struct l2cap_cmd_hdr cmd;
3453 l2cap_raw_recv(conn, skb);
3455 while (len >= L2CAP_CMD_HDR_SIZE) {
3457 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3458 data += L2CAP_CMD_HDR_SIZE;
3459 len -= L2CAP_CMD_HDR_SIZE;
3461 cmd_len = le16_to_cpu(cmd.len);
3463 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3465 if (cmd_len > len || !cmd.ident) {
3466 BT_DBG("corrupted command");
3470 if (conn->hcon->type == LE_LINK)
3471 err = l2cap_le_sig_cmd(conn, &cmd, data);
3473 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3476 struct l2cap_cmd_rej_unk rej;
3478 BT_ERR("Wrong link type (%d)", err);
3480 /* FIXME: Map err to a valid reason */
3481 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3482 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3492 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3494 u16 our_fcs, rcv_fcs;
3497 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3498 hdr_size = L2CAP_EXT_HDR_SIZE;
3500 hdr_size = L2CAP_ENH_HDR_SIZE;
3502 if (chan->fcs == L2CAP_FCS_CRC16) {
3503 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3504 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3505 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3507 if (our_fcs != rcv_fcs)
3513 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3517 chan->frames_sent = 0;
3519 control |= __set_reqseq(chan, chan->buffer_seq);
3521 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3522 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3523 l2cap_send_sframe(chan, control);
3524 set_bit(CONN_RNR_SENT, &chan->conn_state);
3527 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3528 l2cap_retransmit_frames(chan);
3530 l2cap_ertm_send(chan);
3532 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3533 chan->frames_sent == 0) {
3534 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3535 l2cap_send_sframe(chan, control);
3539 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3541 struct sk_buff *next_skb;
3542 int tx_seq_offset, next_tx_seq_offset;
3544 bt_cb(skb)->tx_seq = tx_seq;
3545 bt_cb(skb)->sar = sar;
3547 next_skb = skb_peek(&chan->srej_q);
3549 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3552 if (bt_cb(next_skb)->tx_seq == tx_seq)
3555 next_tx_seq_offset = __seq_offset(chan,
3556 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3558 if (next_tx_seq_offset > tx_seq_offset) {
3559 __skb_queue_before(&chan->srej_q, next_skb, skb);
3563 if (skb_queue_is_last(&chan->srej_q, next_skb))
3566 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3569 __skb_queue_tail(&chan->srej_q, skb);
3574 static void append_skb_frag(struct sk_buff *skb,
3575 struct sk_buff *new_frag, struct sk_buff **last_frag)
3577 /* skb->len reflects data in skb as well as all fragments
3578 * skb->data_len reflects only data in fragments
3580 if (!skb_has_frag_list(skb))
3581 skb_shinfo(skb)->frag_list = new_frag;
3583 new_frag->next = NULL;
3585 (*last_frag)->next = new_frag;
3586 *last_frag = new_frag;
3588 skb->len += new_frag->len;
3589 skb->data_len += new_frag->len;
3590 skb->truesize += new_frag->truesize;
3593 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3597 switch (__get_ctrl_sar(chan, control)) {
3598 case L2CAP_SAR_UNSEGMENTED:
3602 err = chan->ops->recv(chan->data, skb);
3605 case L2CAP_SAR_START:
3609 chan->sdu_len = get_unaligned_le16(skb->data);
3610 skb_pull(skb, L2CAP_SDULEN_SIZE);
3612 if (chan->sdu_len > chan->imtu) {
3617 if (skb->len >= chan->sdu_len)
3621 chan->sdu_last_frag = skb;
3627 case L2CAP_SAR_CONTINUE:
3631 append_skb_frag(chan->sdu, skb,
3632 &chan->sdu_last_frag);
3635 if (chan->sdu->len >= chan->sdu_len)
3645 append_skb_frag(chan->sdu, skb,
3646 &chan->sdu_last_frag);
3649 if (chan->sdu->len != chan->sdu_len)
3652 err = chan->ops->recv(chan->data, chan->sdu);
3655 /* Reassembly complete */
3657 chan->sdu_last_frag = NULL;
3665 kfree_skb(chan->sdu);
3667 chan->sdu_last_frag = NULL;
3674 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3678 BT_DBG("chan %p, Enter local busy", chan);
3680 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3682 control = __set_reqseq(chan, chan->buffer_seq);
3683 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3684 l2cap_send_sframe(chan, control);
3686 set_bit(CONN_RNR_SENT, &chan->conn_state);
3688 __clear_ack_timer(chan);
3691 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3695 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3698 control = __set_reqseq(chan, chan->buffer_seq);
3699 control |= __set_ctrl_poll(chan);
3700 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3701 l2cap_send_sframe(chan, control);
3702 chan->retry_count = 1;
3704 __clear_retrans_timer(chan);
3705 __set_monitor_timer(chan);
3707 set_bit(CONN_WAIT_F, &chan->conn_state);
3710 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3711 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3713 BT_DBG("chan %p, Exit local busy", chan);
3716 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3718 if (chan->mode == L2CAP_MODE_ERTM) {
3720 l2cap_ertm_enter_local_busy(chan);
3722 l2cap_ertm_exit_local_busy(chan);
3726 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3728 struct sk_buff *skb;
3731 while ((skb = skb_peek(&chan->srej_q)) &&
3732 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3735 if (bt_cb(skb)->tx_seq != tx_seq)
3738 skb = skb_dequeue(&chan->srej_q);
3739 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3740 err = l2cap_reassemble_sdu(chan, skb, control);
3743 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3747 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3748 tx_seq = __next_seq(chan, tx_seq);
3752 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3754 struct srej_list *l, *tmp;
3757 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3758 if (l->tx_seq == tx_seq) {
3763 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3764 control |= __set_reqseq(chan, l->tx_seq);
3765 l2cap_send_sframe(chan, control);
3767 list_add_tail(&l->list, &chan->srej_l);
3771 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3773 struct srej_list *new;
3776 while (tx_seq != chan->expected_tx_seq) {
3777 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3778 control |= __set_reqseq(chan, chan->expected_tx_seq);
3779 l2cap_send_sframe(chan, control);
3781 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3785 new->tx_seq = chan->expected_tx_seq;
3787 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3789 list_add_tail(&new->list, &chan->srej_l);
3792 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3797 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3799 u16 tx_seq = __get_txseq(chan, rx_control);
3800 u16 req_seq = __get_reqseq(chan, rx_control);
3801 u8 sar = __get_ctrl_sar(chan, rx_control);
3802 int tx_seq_offset, expected_tx_seq_offset;
3803 int num_to_ack = (chan->tx_win/6) + 1;
3806 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3807 tx_seq, rx_control);
3809 if (__is_ctrl_final(chan, rx_control) &&
3810 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3811 __clear_monitor_timer(chan);
3812 if (chan->unacked_frames > 0)
3813 __set_retrans_timer(chan);
3814 clear_bit(CONN_WAIT_F, &chan->conn_state);
3817 chan->expected_ack_seq = req_seq;
3818 l2cap_drop_acked_frames(chan);
3820 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3822 /* invalid tx_seq */
3823 if (tx_seq_offset >= chan->tx_win) {
3824 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3828 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3831 if (tx_seq == chan->expected_tx_seq)
3834 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3835 struct srej_list *first;
3837 first = list_first_entry(&chan->srej_l,
3838 struct srej_list, list);
3839 if (tx_seq == first->tx_seq) {
3840 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3841 l2cap_check_srej_gap(chan, tx_seq);
3843 list_del(&first->list);
3846 if (list_empty(&chan->srej_l)) {
3847 chan->buffer_seq = chan->buffer_seq_srej;
3848 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3849 l2cap_send_ack(chan);
3850 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3853 struct srej_list *l;
3855 /* duplicated tx_seq */
3856 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3859 list_for_each_entry(l, &chan->srej_l, list) {
3860 if (l->tx_seq == tx_seq) {
3861 l2cap_resend_srejframe(chan, tx_seq);
3866 err = l2cap_send_srejframe(chan, tx_seq);
3868 l2cap_send_disconn_req(chan->conn, chan, -err);
3873 expected_tx_seq_offset = __seq_offset(chan,
3874 chan->expected_tx_seq, chan->buffer_seq);
3876 /* duplicated tx_seq */
3877 if (tx_seq_offset < expected_tx_seq_offset)
3880 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3882 BT_DBG("chan %p, Enter SREJ", chan);
3884 INIT_LIST_HEAD(&chan->srej_l);
3885 chan->buffer_seq_srej = chan->buffer_seq;
3887 __skb_queue_head_init(&chan->srej_q);
3888 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3890 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3892 err = l2cap_send_srejframe(chan, tx_seq);
3894 l2cap_send_disconn_req(chan->conn, chan, -err);
3898 __clear_ack_timer(chan);
3903 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3905 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3906 bt_cb(skb)->tx_seq = tx_seq;
3907 bt_cb(skb)->sar = sar;
3908 __skb_queue_tail(&chan->srej_q, skb);
3912 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3913 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3916 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3920 if (__is_ctrl_final(chan, rx_control)) {
3921 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3922 l2cap_retransmit_frames(chan);
3926 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3927 if (chan->num_acked == num_to_ack - 1)
3928 l2cap_send_ack(chan);
3930 __set_ack_timer(chan);
3939 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3941 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3942 __get_reqseq(chan, rx_control), rx_control);
3944 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3945 l2cap_drop_acked_frames(chan);
3947 if (__is_ctrl_poll(chan, rx_control)) {
3948 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3949 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3950 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3951 (chan->unacked_frames > 0))
3952 __set_retrans_timer(chan);
3954 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3955 l2cap_send_srejtail(chan);
3957 l2cap_send_i_or_rr_or_rnr(chan);
3960 } else if (__is_ctrl_final(chan, rx_control)) {
3961 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3963 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3964 l2cap_retransmit_frames(chan);
3967 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3968 (chan->unacked_frames > 0))
3969 __set_retrans_timer(chan);
3971 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3972 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3973 l2cap_send_ack(chan);
3975 l2cap_ertm_send(chan);
3979 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3981 u16 tx_seq = __get_reqseq(chan, rx_control);
3983 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3985 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3987 chan->expected_ack_seq = tx_seq;
3988 l2cap_drop_acked_frames(chan);
3990 if (__is_ctrl_final(chan, rx_control)) {
3991 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3992 l2cap_retransmit_frames(chan);
3994 l2cap_retransmit_frames(chan);
3996 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3997 set_bit(CONN_REJ_ACT, &chan->conn_state);
4000 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4002 u16 tx_seq = __get_reqseq(chan, rx_control);
4004 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4006 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4008 if (__is_ctrl_poll(chan, rx_control)) {
4009 chan->expected_ack_seq = tx_seq;
4010 l2cap_drop_acked_frames(chan);
4012 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4013 l2cap_retransmit_one_frame(chan, tx_seq);
4015 l2cap_ertm_send(chan);
4017 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4018 chan->srej_save_reqseq = tx_seq;
4019 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4021 } else if (__is_ctrl_final(chan, rx_control)) {
4022 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4023 chan->srej_save_reqseq == tx_seq)
4024 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4026 l2cap_retransmit_one_frame(chan, tx_seq);
4028 l2cap_retransmit_one_frame(chan, tx_seq);
4029 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4030 chan->srej_save_reqseq = tx_seq;
4031 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4036 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4038 u16 tx_seq = __get_reqseq(chan, rx_control);
4040 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4042 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4043 chan->expected_ack_seq = tx_seq;
4044 l2cap_drop_acked_frames(chan);
4046 if (__is_ctrl_poll(chan, rx_control))
4047 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4049 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4050 __clear_retrans_timer(chan);
4051 if (__is_ctrl_poll(chan, rx_control))
4052 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4056 if (__is_ctrl_poll(chan, rx_control)) {
4057 l2cap_send_srejtail(chan);
4059 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4060 l2cap_send_sframe(chan, rx_control);
4064 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4066 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4068 if (__is_ctrl_final(chan, rx_control) &&
4069 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4070 __clear_monitor_timer(chan);
4071 if (chan->unacked_frames > 0)
4072 __set_retrans_timer(chan);
4073 clear_bit(CONN_WAIT_F, &chan->conn_state);
4076 switch (__get_ctrl_super(chan, rx_control)) {
4077 case L2CAP_SUPER_RR:
4078 l2cap_data_channel_rrframe(chan, rx_control);
4081 case L2CAP_SUPER_REJ:
4082 l2cap_data_channel_rejframe(chan, rx_control);
4085 case L2CAP_SUPER_SREJ:
4086 l2cap_data_channel_srejframe(chan, rx_control);
4089 case L2CAP_SUPER_RNR:
4090 l2cap_data_channel_rnrframe(chan, rx_control);
4098 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4100 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4103 int len, next_tx_seq_offset, req_seq_offset;
4105 control = __get_control(chan, skb->data);
4106 skb_pull(skb, __ctrl_size(chan));
4110 * We can just drop the corrupted I-frame here.
4111 * Receiver will miss it and start proper recovery
4112 * procedures and ask retransmission.
4114 if (l2cap_check_fcs(chan, skb))
4117 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4118 len -= L2CAP_SDULEN_SIZE;
4120 if (chan->fcs == L2CAP_FCS_CRC16)
4121 len -= L2CAP_FCS_SIZE;
4123 if (len > chan->mps) {
4124 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4128 req_seq = __get_reqseq(chan, control);
4130 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4132 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4133 chan->expected_ack_seq);
4135 /* check for invalid req-seq */
4136 if (req_seq_offset > next_tx_seq_offset) {
4137 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4141 if (!__is_sframe(chan, control)) {
4143 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4147 l2cap_data_channel_iframe(chan, control, skb);
4151 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4155 l2cap_data_channel_sframe(chan, control, skb);
4165 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4167 struct l2cap_chan *chan;
4168 struct sock *sk = NULL;
4173 chan = l2cap_get_chan_by_scid(conn, cid);
4175 BT_DBG("unknown cid 0x%4.4x", cid);
4181 BT_DBG("chan %p, len %d", chan, skb->len);
4183 if (chan->state != BT_CONNECTED)
4186 switch (chan->mode) {
4187 case L2CAP_MODE_BASIC:
4188 /* If socket recv buffers overflows we drop data here
4189 * which is *bad* because L2CAP has to be reliable.
4190 * But we don't have any other choice. L2CAP doesn't
4191 * provide flow control mechanism. */
4193 if (chan->imtu < skb->len)
4196 if (!chan->ops->recv(chan->data, skb))
4200 case L2CAP_MODE_ERTM:
4201 l2cap_ertm_data_rcv(sk, skb);
4205 case L2CAP_MODE_STREAMING:
4206 control = __get_control(chan, skb->data);
4207 skb_pull(skb, __ctrl_size(chan));
4210 if (l2cap_check_fcs(chan, skb))
4213 if (__is_sar_start(chan, control))
4214 len -= L2CAP_SDULEN_SIZE;
4216 if (chan->fcs == L2CAP_FCS_CRC16)
4217 len -= L2CAP_FCS_SIZE;
4219 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4222 tx_seq = __get_txseq(chan, control);
4224 if (chan->expected_tx_seq != tx_seq) {
4225 /* Frame(s) missing - must discard partial SDU */
4226 kfree_skb(chan->sdu);
4228 chan->sdu_last_frag = NULL;
4231 /* TODO: Notify userland of missing data */
4234 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4236 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4237 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4242 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4256 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4258 struct sock *sk = NULL;
4259 struct l2cap_chan *chan;
4261 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4269 BT_DBG("sk %p, len %d", sk, skb->len);
4271 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4274 if (chan->imtu < skb->len)
4277 if (!chan->ops->recv(chan->data, skb))
4289 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4291 struct sock *sk = NULL;
4292 struct l2cap_chan *chan;
4294 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4302 BT_DBG("sk %p, len %d", sk, skb->len);
4304 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4307 if (chan->imtu < skb->len)
4310 if (!chan->ops->recv(chan->data, skb))
4322 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4324 struct l2cap_hdr *lh = (void *) skb->data;
4328 skb_pull(skb, L2CAP_HDR_SIZE);
4329 cid = __le16_to_cpu(lh->cid);
4330 len = __le16_to_cpu(lh->len);
4332 if (len != skb->len) {
4337 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4340 case L2CAP_CID_LE_SIGNALING:
4341 case L2CAP_CID_SIGNALING:
4342 l2cap_sig_channel(conn, skb);
4345 case L2CAP_CID_CONN_LESS:
4346 psm = get_unaligned_le16(skb->data);
4348 l2cap_conless_channel(conn, psm, skb);
4351 case L2CAP_CID_LE_DATA:
4352 l2cap_att_channel(conn, cid, skb);
4356 if (smp_sig_channel(conn, skb))
4357 l2cap_conn_del(conn->hcon, EACCES);
4361 l2cap_data_channel(conn, cid, skb);
4366 /* ---- L2CAP interface with lower layer (HCI) ---- */
4368 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4370 int exact = 0, lm1 = 0, lm2 = 0;
4371 struct l2cap_chan *c;
4373 if (type != ACL_LINK)
4376 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4378 /* Find listening sockets and check their link_mode */
4379 read_lock(&chan_list_lock);
4380 list_for_each_entry(c, &chan_list, global_l) {
4381 struct sock *sk = c->sk;
4383 if (c->state != BT_LISTEN)
4386 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4387 lm1 |= HCI_LM_ACCEPT;
4388 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4389 lm1 |= HCI_LM_MASTER;
4391 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4392 lm2 |= HCI_LM_ACCEPT;
4393 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4394 lm2 |= HCI_LM_MASTER;
4397 read_unlock(&chan_list_lock);
4399 return exact ? lm1 : lm2;
4402 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4404 struct l2cap_conn *conn;
4406 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4408 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4412 conn = l2cap_conn_add(hcon, status);
4414 l2cap_conn_ready(conn);
4416 l2cap_conn_del(hcon, bt_to_errno(status));
4421 static int l2cap_disconn_ind(struct hci_conn *hcon)
4423 struct l2cap_conn *conn = hcon->l2cap_data;
4425 BT_DBG("hcon %p", hcon);
4427 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4428 return HCI_ERROR_REMOTE_USER_TERM;
4430 return conn->disc_reason;
4433 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4435 BT_DBG("hcon %p reason %d", hcon, reason);
4437 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4440 l2cap_conn_del(hcon, bt_to_errno(reason));
4445 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4447 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4450 if (encrypt == 0x00) {
4451 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4452 __clear_chan_timer(chan);
4453 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4454 } else if (chan->sec_level == BT_SECURITY_HIGH)
4455 l2cap_chan_close(chan, ECONNREFUSED);
4457 if (chan->sec_level == BT_SECURITY_MEDIUM)
4458 __clear_chan_timer(chan);
4462 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4464 struct l2cap_conn *conn = hcon->l2cap_data;
4465 struct l2cap_chan *chan;
4470 BT_DBG("conn %p", conn);
4472 if (hcon->type == LE_LINK) {
4473 smp_distribute_keys(conn, 0);
4474 del_timer(&conn->security_timer);
4477 mutex_lock(&conn->chan_lock);
4479 list_for_each_entry(chan, &conn->chan_l, list) {
4480 struct sock *sk = chan->sk;
4484 BT_DBG("chan->scid %d", chan->scid);
4486 if (chan->scid == L2CAP_CID_LE_DATA) {
4487 if (!status && encrypt) {
4488 chan->sec_level = hcon->sec_level;
4489 l2cap_chan_ready(sk);
4496 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4501 if (!status && (chan->state == BT_CONNECTED ||
4502 chan->state == BT_CONFIG)) {
4503 l2cap_check_encryption(chan, encrypt);
4508 if (chan->state == BT_CONNECT) {
4510 struct l2cap_conn_req req;
4511 req.scid = cpu_to_le16(chan->scid);
4512 req.psm = chan->psm;
4514 chan->ident = l2cap_get_ident(conn);
4515 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4517 l2cap_send_cmd(conn, chan->ident,
4518 L2CAP_CONN_REQ, sizeof(req), &req);
4520 __clear_chan_timer(chan);
4521 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4523 } else if (chan->state == BT_CONNECT2) {
4524 struct l2cap_conn_rsp rsp;
4528 if (bt_sk(sk)->defer_setup) {
4529 struct sock *parent = bt_sk(sk)->parent;
4530 res = L2CAP_CR_PEND;
4531 stat = L2CAP_CS_AUTHOR_PEND;
4533 parent->sk_data_ready(parent, 0);
4535 l2cap_state_change(chan, BT_CONFIG);
4536 res = L2CAP_CR_SUCCESS;
4537 stat = L2CAP_CS_NO_INFO;
4540 l2cap_state_change(chan, BT_DISCONN);
4541 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4542 res = L2CAP_CR_SEC_BLOCK;
4543 stat = L2CAP_CS_NO_INFO;
4546 rsp.scid = cpu_to_le16(chan->dcid);
4547 rsp.dcid = cpu_to_le16(chan->scid);
4548 rsp.result = cpu_to_le16(res);
4549 rsp.status = cpu_to_le16(stat);
4550 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4557 mutex_unlock(&conn->chan_lock);
4562 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4564 struct l2cap_conn *conn = hcon->l2cap_data;
4567 conn = l2cap_conn_add(hcon, 0);
4572 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4574 if (!(flags & ACL_CONT)) {
4575 struct l2cap_hdr *hdr;
4576 struct l2cap_chan *chan;
4581 BT_ERR("Unexpected start frame (len %d)", skb->len);
4582 kfree_skb(conn->rx_skb);
4583 conn->rx_skb = NULL;
4585 l2cap_conn_unreliable(conn, ECOMM);
4588 /* Start fragment always begin with Basic L2CAP header */
4589 if (skb->len < L2CAP_HDR_SIZE) {
4590 BT_ERR("Frame is too short (len %d)", skb->len);
4591 l2cap_conn_unreliable(conn, ECOMM);
4595 hdr = (struct l2cap_hdr *) skb->data;
4596 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4597 cid = __le16_to_cpu(hdr->cid);
4599 if (len == skb->len) {
4600 /* Complete frame received */
4601 l2cap_recv_frame(conn, skb);
4605 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4607 if (skb->len > len) {
4608 BT_ERR("Frame is too long (len %d, expected len %d)",
4610 l2cap_conn_unreliable(conn, ECOMM);
4614 chan = l2cap_get_chan_by_scid(conn, cid);
4616 if (chan && chan->sk) {
4617 struct sock *sk = chan->sk;
4619 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4620 BT_ERR("Frame exceeding recv MTU (len %d, "
4624 l2cap_conn_unreliable(conn, ECOMM);
4630 /* Allocate skb for the complete frame (with header) */
4631 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4635 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4637 conn->rx_len = len - skb->len;
4639 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4641 if (!conn->rx_len) {
4642 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4643 l2cap_conn_unreliable(conn, ECOMM);
4647 if (skb->len > conn->rx_len) {
4648 BT_ERR("Fragment is too long (len %d, expected %d)",
4649 skb->len, conn->rx_len);
4650 kfree_skb(conn->rx_skb);
4651 conn->rx_skb = NULL;
4653 l2cap_conn_unreliable(conn, ECOMM);
4657 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4659 conn->rx_len -= skb->len;
4661 if (!conn->rx_len) {
4662 /* Complete frame received */
4663 l2cap_recv_frame(conn, conn->rx_skb);
4664 conn->rx_skb = NULL;
4673 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4675 struct l2cap_chan *c;
4677 read_lock_bh(&chan_list_lock);
4679 list_for_each_entry(c, &chan_list, global_l) {
4680 struct sock *sk = c->sk;
4682 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4683 batostr(&bt_sk(sk)->src),
4684 batostr(&bt_sk(sk)->dst),
4685 c->state, __le16_to_cpu(c->psm),
4686 c->scid, c->dcid, c->imtu, c->omtu,
4687 c->sec_level, c->mode);
4690 read_unlock_bh(&chan_list_lock);
4695 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4697 return single_open(file, l2cap_debugfs_show, inode->i_private);
4700 static const struct file_operations l2cap_debugfs_fops = {
4701 .open = l2cap_debugfs_open,
4703 .llseek = seq_lseek,
4704 .release = single_release,
4707 static struct dentry *l2cap_debugfs;
4709 static struct hci_proto l2cap_hci_proto = {
4711 .id = HCI_PROTO_L2CAP,
4712 .connect_ind = l2cap_connect_ind,
4713 .connect_cfm = l2cap_connect_cfm,
4714 .disconn_ind = l2cap_disconn_ind,
4715 .disconn_cfm = l2cap_disconn_cfm,
4716 .security_cfm = l2cap_security_cfm,
4717 .recv_acldata = l2cap_recv_acldata
4720 int __init l2cap_init(void)
4724 err = l2cap_init_sockets();
4728 err = hci_register_proto(&l2cap_hci_proto);
4730 BT_ERR("L2CAP protocol registration failed");
4731 bt_sock_unregister(BTPROTO_L2CAP);
4736 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4737 bt_debugfs, NULL, &l2cap_debugfs_fops);
4739 BT_ERR("Failed to create L2CAP debug file");
4745 l2cap_cleanup_sockets();
4749 void l2cap_exit(void)
4751 debugfs_remove(l2cap_debugfs);
4753 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4754 BT_ERR("L2CAP protocol unregistration failed");
4756 l2cap_cleanup_sockets();
4759 module_param(disable_ertm, bool, 0644);
4760 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");