2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 /* ---- L2CAP channels ---- */
64 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
69 list_for_each_entry(c, &conn->chan_l, list) {
76 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 /* Find channel with given SCID.
89 * Returns locked channel. */
90 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 mutex_lock(&conn->chan_lock);
96 c = __l2cap_get_chan_by_scid(conn, cid);
99 mutex_unlock(&conn->chan_lock);
104 /* Find channel with given DCID.
105 * Returns locked channel.
107 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
110 struct l2cap_chan *c;
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_dcid(conn, cid);
116 mutex_unlock(&conn->chan_lock);
121 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
124 struct l2cap_chan *c;
126 list_for_each_entry(c, &conn->chan_l, list) {
127 if (c->ident == ident)
133 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_ident(conn, ident);
142 mutex_unlock(&conn->chan_lock);
147 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
149 struct l2cap_chan *c;
151 list_for_each_entry(c, &chan_list, global_l) {
152 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
158 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
162 write_lock(&chan_list_lock);
164 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 for (p = 0x1001; p < 0x1100; p += 2)
178 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
179 chan->psm = cpu_to_le16(p);
180 chan->sport = cpu_to_le16(p);
187 write_unlock(&chan_list_lock);
191 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
193 write_lock(&chan_list_lock);
197 write_unlock(&chan_list_lock);
202 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
204 u16 cid = L2CAP_CID_DYN_START;
206 for (; cid < L2CAP_CID_DYN_END; cid++) {
207 if (!__l2cap_get_chan_by_scid(conn, cid))
214 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
216 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
217 state_to_string(state));
220 chan->ops->state_change(chan, state);
223 static void l2cap_state_change(struct l2cap_chan *chan, int state)
225 struct sock *sk = chan->sk;
228 __l2cap_state_change(chan, state);
232 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
234 struct sock *sk = chan->sk;
239 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
241 struct sock *sk = chan->sk;
244 __l2cap_chan_set_err(chan, err);
248 static void __set_retrans_timer(struct l2cap_chan *chan)
250 if (!delayed_work_pending(&chan->monitor_timer) &&
251 chan->retrans_timeout) {
252 l2cap_set_timer(chan, &chan->retrans_timer,
253 msecs_to_jiffies(chan->retrans_timeout));
257 static void __set_monitor_timer(struct l2cap_chan *chan)
259 __clear_retrans_timer(chan);
260 if (chan->monitor_timeout) {
261 l2cap_set_timer(chan, &chan->monitor_timer,
262 msecs_to_jiffies(chan->monitor_timeout));
266 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
271 skb_queue_walk(head, skb) {
272 if (bt_cb(skb)->control.txseq == seq)
279 /* ---- L2CAP sequence number lists ---- */
281 /* For ERTM, ordered lists of sequence numbers must be tracked for
282 * SREJ requests that are received and for frames that are to be
283 * retransmitted. These seq_list functions implement a singly-linked
284 * list in an array, where membership in the list can also be checked
285 * in constant time. Items can also be added to the tail of the list
286 * and removed from the head in constant time, without further memory
290 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
292 size_t alloc_size, i;
294 /* Allocated size is a power of 2 to map sequence numbers
295 * (which may be up to 14 bits) in to a smaller array that is
296 * sized for the negotiated ERTM transmit windows.
298 alloc_size = roundup_pow_of_two(size);
300 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
304 seq_list->mask = alloc_size - 1;
305 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
306 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
307 for (i = 0; i < alloc_size; i++)
308 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
313 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
315 kfree(seq_list->list);
318 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
321 /* Constant-time check for list membership */
322 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
325 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
327 u16 mask = seq_list->mask;
329 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
330 /* In case someone tries to pop the head of an empty list */
331 return L2CAP_SEQ_LIST_CLEAR;
332 } else if (seq_list->head == seq) {
333 /* Head can be removed in constant time */
334 seq_list->head = seq_list->list[seq & mask];
335 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
337 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
342 /* Walk the list to find the sequence number */
343 u16 prev = seq_list->head;
344 while (seq_list->list[prev & mask] != seq) {
345 prev = seq_list->list[prev & mask];
346 if (prev == L2CAP_SEQ_LIST_TAIL)
347 return L2CAP_SEQ_LIST_CLEAR;
350 /* Unlink the number from the list and clear it */
351 seq_list->list[prev & mask] = seq_list->list[seq & mask];
352 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
353 if (seq_list->tail == seq)
354 seq_list->tail = prev;
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
361 /* Remove the head in constant time */
362 return l2cap_seq_list_remove(seq_list, seq_list->head);
365 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
369 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
372 for (i = 0; i <= seq_list->mask; i++)
373 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
376 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
379 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
381 u16 mask = seq_list->mask;
383 /* All appends happen in constant time */
385 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
388 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
389 seq_list->head = seq;
391 seq_list->list[seq_list->tail & mask] = seq;
393 seq_list->tail = seq;
394 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
397 static void l2cap_chan_timeout(struct work_struct *work)
399 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
401 struct l2cap_conn *conn = chan->conn;
404 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
406 mutex_lock(&conn->chan_lock);
407 l2cap_chan_lock(chan);
409 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
410 reason = ECONNREFUSED;
411 else if (chan->state == BT_CONNECT &&
412 chan->sec_level != BT_SECURITY_SDP)
413 reason = ECONNREFUSED;
417 l2cap_chan_close(chan, reason);
419 l2cap_chan_unlock(chan);
421 chan->ops->close(chan);
422 mutex_unlock(&conn->chan_lock);
424 l2cap_chan_put(chan);
427 struct l2cap_chan *l2cap_chan_create(void)
429 struct l2cap_chan *chan;
431 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
435 mutex_init(&chan->lock);
437 write_lock(&chan_list_lock);
438 list_add(&chan->global_l, &chan_list);
439 write_unlock(&chan_list_lock);
441 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
443 chan->state = BT_OPEN;
445 kref_init(&chan->kref);
447 /* This flag is cleared in l2cap_chan_ready() */
448 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
450 BT_DBG("chan %p", chan);
455 static void l2cap_chan_destroy(struct kref *kref)
457 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
459 BT_DBG("chan %p", chan);
461 write_lock(&chan_list_lock);
462 list_del(&chan->global_l);
463 write_unlock(&chan_list_lock);
468 void l2cap_chan_hold(struct l2cap_chan *c)
470 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
475 void l2cap_chan_put(struct l2cap_chan *c)
477 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
479 kref_put(&c->kref, l2cap_chan_destroy);
482 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
484 chan->fcs = L2CAP_FCS_CRC16;
485 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
486 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
487 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
488 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
489 chan->sec_level = BT_SECURITY_LOW;
491 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
494 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
496 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
497 __le16_to_cpu(chan->psm), chan->dcid);
499 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
503 switch (chan->chan_type) {
504 case L2CAP_CHAN_CONN_ORIENTED:
505 if (conn->hcon->type == LE_LINK) {
507 chan->omtu = L2CAP_DEFAULT_MTU;
508 if (chan->dcid == L2CAP_CID_ATT)
509 chan->scid = L2CAP_CID_ATT;
511 chan->scid = l2cap_alloc_cid(conn);
513 /* Alloc CID for connection-oriented socket */
514 chan->scid = l2cap_alloc_cid(conn);
515 chan->omtu = L2CAP_DEFAULT_MTU;
519 case L2CAP_CHAN_CONN_LESS:
520 /* Connectionless socket */
521 chan->scid = L2CAP_CID_CONN_LESS;
522 chan->dcid = L2CAP_CID_CONN_LESS;
523 chan->omtu = L2CAP_DEFAULT_MTU;
526 case L2CAP_CHAN_CONN_FIX_A2MP:
527 chan->scid = L2CAP_CID_A2MP;
528 chan->dcid = L2CAP_CID_A2MP;
529 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
530 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
534 /* Raw socket can send/recv signalling messages only */
535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
537 chan->omtu = L2CAP_DEFAULT_MTU;
540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
547 l2cap_chan_hold(chan);
549 hci_conn_hold(conn->hcon);
551 list_add(&chan->list, &conn->chan_l);
554 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
556 mutex_lock(&conn->chan_lock);
557 __l2cap_chan_add(conn, chan);
558 mutex_unlock(&conn->chan_lock);
561 void l2cap_chan_del(struct l2cap_chan *chan, int err)
563 struct l2cap_conn *conn = chan->conn;
565 __clear_chan_timer(chan);
567 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
570 struct amp_mgr *mgr = conn->hcon->amp_mgr;
571 /* Delete from channel list */
572 list_del(&chan->list);
574 l2cap_chan_put(chan);
578 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
579 hci_conn_drop(conn->hcon);
581 if (mgr && mgr->bredr_chan == chan)
582 mgr->bredr_chan = NULL;
585 if (chan->hs_hchan) {
586 struct hci_chan *hs_hchan = chan->hs_hchan;
588 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
589 amp_disconnect_logical_link(hs_hchan);
592 chan->ops->teardown(chan, err);
594 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
598 case L2CAP_MODE_BASIC:
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
606 skb_queue_purge(&chan->srej_q);
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
621 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
623 struct l2cap_conn *conn = chan->conn;
624 struct sock *sk = chan->sk;
626 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
629 switch (chan->state) {
631 chan->ops->teardown(chan, 0);
636 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
637 conn->hcon->type == ACL_LINK) {
638 __set_chan_timer(chan, sk->sk_sndtimeo);
639 l2cap_send_disconn_req(chan, reason);
641 l2cap_chan_del(chan, reason);
645 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
646 conn->hcon->type == ACL_LINK) {
647 struct l2cap_conn_rsp rsp;
650 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
651 result = L2CAP_CR_SEC_BLOCK;
653 result = L2CAP_CR_BAD_PSM;
654 l2cap_state_change(chan, BT_DISCONN);
656 rsp.scid = cpu_to_le16(chan->dcid);
657 rsp.dcid = cpu_to_le16(chan->scid);
658 rsp.result = cpu_to_le16(result);
659 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
660 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
664 l2cap_chan_del(chan, reason);
669 l2cap_chan_del(chan, reason);
673 chan->ops->teardown(chan, 0);
678 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
680 if (chan->chan_type == L2CAP_CHAN_RAW) {
681 switch (chan->sec_level) {
682 case BT_SECURITY_HIGH:
683 return HCI_AT_DEDICATED_BONDING_MITM;
684 case BT_SECURITY_MEDIUM:
685 return HCI_AT_DEDICATED_BONDING;
687 return HCI_AT_NO_BONDING;
689 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
690 if (chan->sec_level == BT_SECURITY_LOW)
691 chan->sec_level = BT_SECURITY_SDP;
693 if (chan->sec_level == BT_SECURITY_HIGH)
694 return HCI_AT_NO_BONDING_MITM;
696 return HCI_AT_NO_BONDING;
698 switch (chan->sec_level) {
699 case BT_SECURITY_HIGH:
700 return HCI_AT_GENERAL_BONDING_MITM;
701 case BT_SECURITY_MEDIUM:
702 return HCI_AT_GENERAL_BONDING;
704 return HCI_AT_NO_BONDING;
709 /* Service level security */
710 int l2cap_chan_check_security(struct l2cap_chan *chan)
712 struct l2cap_conn *conn = chan->conn;
715 auth_type = l2cap_get_auth_type(chan);
717 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
720 static u8 l2cap_get_ident(struct l2cap_conn *conn)
724 /* Get next available identificator.
725 * 1 - 128 are used by kernel.
726 * 129 - 199 are reserved.
727 * 200 - 254 are used by utilities like l2ping, etc.
730 spin_lock(&conn->lock);
732 if (++conn->tx_ident > 128)
737 spin_unlock(&conn->lock);
742 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
745 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
748 BT_DBG("code 0x%2.2x", code);
753 if (lmp_no_flush_capable(conn->hcon->hdev))
754 flags = ACL_START_NO_FLUSH;
758 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
759 skb->priority = HCI_PRIO_MAX;
761 hci_send_acl(conn->hchan, skb, flags);
764 static bool __chan_is_moving(struct l2cap_chan *chan)
766 return chan->move_state != L2CAP_MOVE_STABLE &&
767 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
770 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
772 struct hci_conn *hcon = chan->conn->hcon;
775 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
778 if (chan->hs_hcon && !__chan_is_moving(chan)) {
780 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
787 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
788 lmp_no_flush_capable(hcon->hdev))
789 flags = ACL_START_NO_FLUSH;
793 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
794 hci_send_acl(chan->conn->hchan, skb, flags);
797 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
799 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
800 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
802 if (enh & L2CAP_CTRL_FRAME_TYPE) {
805 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
806 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
813 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
814 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
821 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
823 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
824 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
826 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
829 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
830 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
837 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
838 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
845 static inline void __unpack_control(struct l2cap_chan *chan,
848 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
849 __unpack_extended_control(get_unaligned_le32(skb->data),
850 &bt_cb(skb)->control);
851 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
853 __unpack_enhanced_control(get_unaligned_le16(skb->data),
854 &bt_cb(skb)->control);
855 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
859 static u32 __pack_extended_control(struct l2cap_ctrl *control)
863 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
864 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
866 if (control->sframe) {
867 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
868 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
869 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
871 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
872 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
878 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
882 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
883 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
885 if (control->sframe) {
886 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
887 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
888 packed |= L2CAP_CTRL_FRAME_TYPE;
890 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
891 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
897 static inline void __pack_control(struct l2cap_chan *chan,
898 struct l2cap_ctrl *control,
901 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
902 put_unaligned_le32(__pack_extended_control(control),
903 skb->data + L2CAP_HDR_SIZE);
905 put_unaligned_le16(__pack_enhanced_control(control),
906 skb->data + L2CAP_HDR_SIZE);
910 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
912 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
913 return L2CAP_EXT_HDR_SIZE;
915 return L2CAP_ENH_HDR_SIZE;
918 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
922 struct l2cap_hdr *lh;
923 int hlen = __ertm_hdr_size(chan);
925 if (chan->fcs == L2CAP_FCS_CRC16)
926 hlen += L2CAP_FCS_SIZE;
928 skb = bt_skb_alloc(hlen, GFP_KERNEL);
931 return ERR_PTR(-ENOMEM);
933 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
934 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
935 lh->cid = cpu_to_le16(chan->dcid);
937 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
938 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
940 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
942 if (chan->fcs == L2CAP_FCS_CRC16) {
943 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
944 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
947 skb->priority = HCI_PRIO_MAX;
951 static void l2cap_send_sframe(struct l2cap_chan *chan,
952 struct l2cap_ctrl *control)
957 BT_DBG("chan %p, control %p", chan, control);
959 if (!control->sframe)
962 if (__chan_is_moving(chan))
965 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
969 if (control->super == L2CAP_SUPER_RR)
970 clear_bit(CONN_RNR_SENT, &chan->conn_state);
971 else if (control->super == L2CAP_SUPER_RNR)
972 set_bit(CONN_RNR_SENT, &chan->conn_state);
974 if (control->super != L2CAP_SUPER_SREJ) {
975 chan->last_acked_seq = control->reqseq;
976 __clear_ack_timer(chan);
979 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
980 control->final, control->poll, control->super);
982 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
983 control_field = __pack_extended_control(control);
985 control_field = __pack_enhanced_control(control);
987 skb = l2cap_create_sframe_pdu(chan, control_field);
989 l2cap_do_send(chan, skb);
992 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
994 struct l2cap_ctrl control;
996 BT_DBG("chan %p, poll %d", chan, poll);
998 memset(&control, 0, sizeof(control));
1000 control.poll = poll;
1002 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1003 control.super = L2CAP_SUPER_RNR;
1005 control.super = L2CAP_SUPER_RR;
1007 control.reqseq = chan->buffer_seq;
1008 l2cap_send_sframe(chan, &control);
1011 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1013 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1016 static bool __amp_capable(struct l2cap_chan *chan)
1018 struct l2cap_conn *conn = chan->conn;
1019 struct hci_dev *hdev;
1020 bool amp_available = false;
1022 if (!conn->hs_enabled)
1025 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1028 read_lock(&hci_dev_list_lock);
1029 list_for_each_entry(hdev, &hci_dev_list, list) {
1030 if (hdev->amp_type != AMP_TYPE_BREDR &&
1031 test_bit(HCI_UP, &hdev->flags)) {
1032 amp_available = true;
1036 read_unlock(&hci_dev_list_lock);
1038 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1039 return amp_available;
1044 static bool l2cap_check_efs(struct l2cap_chan *chan)
1046 /* Check EFS parameters */
1050 void l2cap_send_conn_req(struct l2cap_chan *chan)
1052 struct l2cap_conn *conn = chan->conn;
1053 struct l2cap_conn_req req;
1055 req.scid = cpu_to_le16(chan->scid);
1056 req.psm = chan->psm;
1058 chan->ident = l2cap_get_ident(conn);
1060 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1062 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1065 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1067 struct l2cap_create_chan_req req;
1068 req.scid = cpu_to_le16(chan->scid);
1069 req.psm = chan->psm;
1070 req.amp_id = amp_id;
1072 chan->ident = l2cap_get_ident(chan->conn);
1074 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1078 static void l2cap_move_setup(struct l2cap_chan *chan)
1080 struct sk_buff *skb;
1082 BT_DBG("chan %p", chan);
1084 if (chan->mode != L2CAP_MODE_ERTM)
1087 __clear_retrans_timer(chan);
1088 __clear_monitor_timer(chan);
1089 __clear_ack_timer(chan);
1091 chan->retry_count = 0;
1092 skb_queue_walk(&chan->tx_q, skb) {
1093 if (bt_cb(skb)->control.retries)
1094 bt_cb(skb)->control.retries = 1;
1099 chan->expected_tx_seq = chan->buffer_seq;
1101 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1102 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1103 l2cap_seq_list_clear(&chan->retrans_list);
1104 l2cap_seq_list_clear(&chan->srej_list);
1105 skb_queue_purge(&chan->srej_q);
1107 chan->tx_state = L2CAP_TX_STATE_XMIT;
1108 chan->rx_state = L2CAP_RX_STATE_MOVE;
1110 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1113 static void l2cap_move_done(struct l2cap_chan *chan)
1115 u8 move_role = chan->move_role;
1116 BT_DBG("chan %p", chan);
1118 chan->move_state = L2CAP_MOVE_STABLE;
1119 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1121 if (chan->mode != L2CAP_MODE_ERTM)
1124 switch (move_role) {
1125 case L2CAP_MOVE_ROLE_INITIATOR:
1126 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1127 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1129 case L2CAP_MOVE_ROLE_RESPONDER:
1130 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1135 static void l2cap_chan_ready(struct l2cap_chan *chan)
1137 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1138 chan->conf_state = 0;
1139 __clear_chan_timer(chan);
1141 chan->state = BT_CONNECTED;
1143 chan->ops->ready(chan);
1146 static void l2cap_start_connection(struct l2cap_chan *chan)
1148 if (__amp_capable(chan)) {
1149 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1150 a2mp_discover_amp(chan);
1152 l2cap_send_conn_req(chan);
1156 static void l2cap_do_start(struct l2cap_chan *chan)
1158 struct l2cap_conn *conn = chan->conn;
1160 if (conn->hcon->type == LE_LINK) {
1161 l2cap_chan_ready(chan);
1165 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1166 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1169 if (l2cap_chan_check_security(chan) &&
1170 __l2cap_no_conn_pending(chan)) {
1171 l2cap_start_connection(chan);
1174 struct l2cap_info_req req;
1175 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1177 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1178 conn->info_ident = l2cap_get_ident(conn);
1180 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1182 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1187 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1189 u32 local_feat_mask = l2cap_feat_mask;
1191 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1194 case L2CAP_MODE_ERTM:
1195 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1196 case L2CAP_MODE_STREAMING:
1197 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1203 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1205 struct sock *sk = chan->sk;
1206 struct l2cap_conn *conn = chan->conn;
1207 struct l2cap_disconn_req req;
1212 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1213 __clear_retrans_timer(chan);
1214 __clear_monitor_timer(chan);
1215 __clear_ack_timer(chan);
1218 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1219 l2cap_state_change(chan, BT_DISCONN);
1223 req.dcid = cpu_to_le16(chan->dcid);
1224 req.scid = cpu_to_le16(chan->scid);
1225 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1229 __l2cap_state_change(chan, BT_DISCONN);
1230 __l2cap_chan_set_err(chan, err);
1234 /* ---- L2CAP connections ---- */
1235 static void l2cap_conn_start(struct l2cap_conn *conn)
1237 struct l2cap_chan *chan, *tmp;
1239 BT_DBG("conn %p", conn);
1241 mutex_lock(&conn->chan_lock);
1243 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1244 struct sock *sk = chan->sk;
1246 l2cap_chan_lock(chan);
1248 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1249 l2cap_chan_unlock(chan);
1253 if (chan->state == BT_CONNECT) {
1254 if (!l2cap_chan_check_security(chan) ||
1255 !__l2cap_no_conn_pending(chan)) {
1256 l2cap_chan_unlock(chan);
1260 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1261 && test_bit(CONF_STATE2_DEVICE,
1262 &chan->conf_state)) {
1263 l2cap_chan_close(chan, ECONNRESET);
1264 l2cap_chan_unlock(chan);
1268 l2cap_start_connection(chan);
1270 } else if (chan->state == BT_CONNECT2) {
1271 struct l2cap_conn_rsp rsp;
1273 rsp.scid = cpu_to_le16(chan->dcid);
1274 rsp.dcid = cpu_to_le16(chan->scid);
1276 if (l2cap_chan_check_security(chan)) {
1278 if (test_bit(BT_SK_DEFER_SETUP,
1279 &bt_sk(sk)->flags)) {
1280 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1281 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1282 chan->ops->defer(chan);
1285 __l2cap_state_change(chan, BT_CONFIG);
1286 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1287 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1291 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1292 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1295 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1298 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1299 rsp.result != L2CAP_CR_SUCCESS) {
1300 l2cap_chan_unlock(chan);
1304 set_bit(CONF_REQ_SENT, &chan->conf_state);
1305 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1306 l2cap_build_conf_req(chan, buf), buf);
1307 chan->num_conf_req++;
1310 l2cap_chan_unlock(chan);
1313 mutex_unlock(&conn->chan_lock);
1316 /* Find socket with cid and source/destination bdaddr.
1317 * Returns closest match, locked.
1319 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1323 struct l2cap_chan *c, *c1 = NULL;
1325 read_lock(&chan_list_lock);
1327 list_for_each_entry(c, &chan_list, global_l) {
1328 struct sock *sk = c->sk;
1330 if (state && c->state != state)
1333 if (c->scid == cid) {
1334 int src_match, dst_match;
1335 int src_any, dst_any;
1338 src_match = !bacmp(&bt_sk(sk)->src, src);
1339 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1340 if (src_match && dst_match) {
1341 read_unlock(&chan_list_lock);
1346 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1347 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1348 if ((src_match && dst_any) || (src_any && dst_match) ||
1349 (src_any && dst_any))
1354 read_unlock(&chan_list_lock);
1359 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1361 struct sock *parent;
1362 struct l2cap_chan *chan, *pchan;
1366 /* Check if we have socket listening on cid */
1367 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1368 conn->src, conn->dst);
1372 /* Client ATT sockets should override the server one */
1373 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1380 chan = pchan->ops->new_connection(pchan);
1384 chan->dcid = L2CAP_CID_ATT;
1386 bacpy(&bt_sk(chan->sk)->src, conn->src);
1387 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1389 __l2cap_chan_add(conn, chan);
1392 release_sock(parent);
1395 static void l2cap_conn_ready(struct l2cap_conn *conn)
1397 struct l2cap_chan *chan;
1398 struct hci_conn *hcon = conn->hcon;
1400 BT_DBG("conn %p", conn);
1402 /* For outgoing pairing which doesn't necessarily have an
1403 * associated socket (e.g. mgmt_pair_device).
1405 if (hcon->out && hcon->type == LE_LINK)
1406 smp_conn_security(hcon, hcon->pending_sec_level);
1408 mutex_lock(&conn->chan_lock);
1410 if (hcon->type == LE_LINK)
1411 l2cap_le_conn_ready(conn);
1413 list_for_each_entry(chan, &conn->chan_l, list) {
1415 l2cap_chan_lock(chan);
1417 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1418 l2cap_chan_unlock(chan);
1422 if (hcon->type == LE_LINK) {
1423 if (smp_conn_security(hcon, chan->sec_level))
1424 l2cap_chan_ready(chan);
1426 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1427 struct sock *sk = chan->sk;
1428 __clear_chan_timer(chan);
1430 __l2cap_state_change(chan, BT_CONNECTED);
1431 sk->sk_state_change(sk);
1434 } else if (chan->state == BT_CONNECT) {
1435 l2cap_do_start(chan);
1438 l2cap_chan_unlock(chan);
1441 mutex_unlock(&conn->chan_lock);
1444 /* Notify sockets that we cannot guaranty reliability anymore */
1445 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1447 struct l2cap_chan *chan;
1449 BT_DBG("conn %p", conn);
1451 mutex_lock(&conn->chan_lock);
1453 list_for_each_entry(chan, &conn->chan_l, list) {
1454 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1455 l2cap_chan_set_err(chan, err);
1458 mutex_unlock(&conn->chan_lock);
1461 static void l2cap_info_timeout(struct work_struct *work)
1463 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1466 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1467 conn->info_ident = 0;
1469 l2cap_conn_start(conn);
1474 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1475 * callback is called during registration. The ->remove callback is called
1476 * during unregistration.
1477 * An l2cap_user object can either be explicitly unregistered or when the
1478 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1479 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1480 * External modules must own a reference to the l2cap_conn object if they intend
1481 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1482 * any time if they don't.
1485 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1487 struct hci_dev *hdev = conn->hcon->hdev;
1490 /* We need to check whether l2cap_conn is registered. If it is not, we
1491 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1492 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1493 * relies on the parent hci_conn object to be locked. This itself relies
1494 * on the hci_dev object to be locked. So we must lock the hci device
1499 if (user->list.next || user->list.prev) {
1504 /* conn->hchan is NULL after l2cap_conn_del() was called */
1510 ret = user->probe(conn, user);
1514 list_add(&user->list, &conn->users);
1518 hci_dev_unlock(hdev);
1521 EXPORT_SYMBOL(l2cap_register_user);
1523 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1525 struct hci_dev *hdev = conn->hcon->hdev;
1529 if (!user->list.next || !user->list.prev)
1532 list_del(&user->list);
1533 user->list.next = NULL;
1534 user->list.prev = NULL;
1535 user->remove(conn, user);
1538 hci_dev_unlock(hdev);
1540 EXPORT_SYMBOL(l2cap_unregister_user);
1542 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1544 struct l2cap_user *user;
1546 while (!list_empty(&conn->users)) {
1547 user = list_first_entry(&conn->users, struct l2cap_user, list);
1548 list_del(&user->list);
1549 user->list.next = NULL;
1550 user->list.prev = NULL;
1551 user->remove(conn, user);
1555 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1557 struct l2cap_conn *conn = hcon->l2cap_data;
1558 struct l2cap_chan *chan, *l;
1563 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1565 kfree_skb(conn->rx_skb);
1567 l2cap_unregister_all_users(conn);
1569 mutex_lock(&conn->chan_lock);
1572 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1573 l2cap_chan_hold(chan);
1574 l2cap_chan_lock(chan);
1576 l2cap_chan_del(chan, err);
1578 l2cap_chan_unlock(chan);
1580 chan->ops->close(chan);
1581 l2cap_chan_put(chan);
1584 mutex_unlock(&conn->chan_lock);
1586 hci_chan_del(conn->hchan);
1588 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1589 cancel_delayed_work_sync(&conn->info_timer);
1591 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1592 cancel_delayed_work_sync(&conn->security_timer);
1593 smp_chan_destroy(conn);
1596 hcon->l2cap_data = NULL;
1598 l2cap_conn_put(conn);
1601 static void security_timeout(struct work_struct *work)
1603 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1604 security_timer.work);
1606 BT_DBG("conn %p", conn);
1608 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1609 smp_chan_destroy(conn);
1610 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1614 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1616 struct l2cap_conn *conn = hcon->l2cap_data;
1617 struct hci_chan *hchan;
1622 hchan = hci_chan_create(hcon);
1626 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1628 hci_chan_del(hchan);
1632 kref_init(&conn->ref);
1633 hcon->l2cap_data = conn;
1635 hci_conn_get(conn->hcon);
1636 conn->hchan = hchan;
1638 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1640 switch (hcon->type) {
1642 if (hcon->hdev->le_mtu) {
1643 conn->mtu = hcon->hdev->le_mtu;
1648 conn->mtu = hcon->hdev->acl_mtu;
1652 conn->src = &hcon->hdev->bdaddr;
1653 conn->dst = &hcon->dst;
1655 conn->feat_mask = 0;
1657 if (hcon->type == ACL_LINK)
1658 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1659 &hcon->hdev->dev_flags);
1661 spin_lock_init(&conn->lock);
1662 mutex_init(&conn->chan_lock);
1664 INIT_LIST_HEAD(&conn->chan_l);
1665 INIT_LIST_HEAD(&conn->users);
1667 if (hcon->type == LE_LINK)
1668 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1670 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1672 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1677 static void l2cap_conn_free(struct kref *ref)
1679 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1681 hci_conn_put(conn->hcon);
1685 void l2cap_conn_get(struct l2cap_conn *conn)
1687 kref_get(&conn->ref);
1689 EXPORT_SYMBOL(l2cap_conn_get);
1691 void l2cap_conn_put(struct l2cap_conn *conn)
1693 kref_put(&conn->ref, l2cap_conn_free);
1695 EXPORT_SYMBOL(l2cap_conn_put);
1697 /* ---- Socket interface ---- */
1699 /* Find socket with psm and source / destination bdaddr.
1700 * Returns closest match.
1702 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1706 struct l2cap_chan *c, *c1 = NULL;
1708 read_lock(&chan_list_lock);
1710 list_for_each_entry(c, &chan_list, global_l) {
1711 struct sock *sk = c->sk;
1713 if (state && c->state != state)
1716 if (c->psm == psm) {
1717 int src_match, dst_match;
1718 int src_any, dst_any;
1721 src_match = !bacmp(&bt_sk(sk)->src, src);
1722 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1723 if (src_match && dst_match) {
1724 read_unlock(&chan_list_lock);
1729 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1730 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1731 if ((src_match && dst_any) || (src_any && dst_match) ||
1732 (src_any && dst_any))
1737 read_unlock(&chan_list_lock);
1742 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1743 bdaddr_t *dst, u8 dst_type)
1745 struct sock *sk = chan->sk;
1746 bdaddr_t *src = &bt_sk(sk)->src;
1747 struct l2cap_conn *conn;
1748 struct hci_conn *hcon;
1749 struct hci_dev *hdev;
1753 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1754 dst_type, __le16_to_cpu(psm));
1756 hdev = hci_get_route(dst, src);
1758 return -EHOSTUNREACH;
1762 l2cap_chan_lock(chan);
1764 /* PSM must be odd and lsb of upper byte must be 0 */
1765 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1766 chan->chan_type != L2CAP_CHAN_RAW) {
1771 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1776 switch (chan->mode) {
1777 case L2CAP_MODE_BASIC:
1779 case L2CAP_MODE_ERTM:
1780 case L2CAP_MODE_STREAMING:
1789 switch (chan->state) {
1793 /* Already connecting */
1798 /* Already connected */
1812 /* Set destination address and psm */
1814 bacpy(&bt_sk(sk)->dst, dst);
1820 auth_type = l2cap_get_auth_type(chan);
1822 if (bdaddr_type_is_le(dst_type))
1823 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1824 chan->sec_level, auth_type);
1826 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1827 chan->sec_level, auth_type);
1830 err = PTR_ERR(hcon);
1834 conn = l2cap_conn_add(hcon);
1836 hci_conn_drop(hcon);
1841 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1842 hci_conn_drop(hcon);
1847 /* Update source addr of the socket */
1848 bacpy(src, conn->src);
1850 l2cap_chan_unlock(chan);
1851 l2cap_chan_add(conn, chan);
1852 l2cap_chan_lock(chan);
1854 /* l2cap_chan_add takes its own ref so we can drop this one */
1855 hci_conn_drop(hcon);
1857 l2cap_state_change(chan, BT_CONNECT);
1858 __set_chan_timer(chan, sk->sk_sndtimeo);
1860 if (hcon->state == BT_CONNECTED) {
1861 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1862 __clear_chan_timer(chan);
1863 if (l2cap_chan_check_security(chan))
1864 l2cap_state_change(chan, BT_CONNECTED);
1866 l2cap_do_start(chan);
1872 l2cap_chan_unlock(chan);
1873 hci_dev_unlock(hdev);
1878 int __l2cap_wait_ack(struct sock *sk)
1880 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1881 DECLARE_WAITQUEUE(wait, current);
1885 add_wait_queue(sk_sleep(sk), &wait);
1886 set_current_state(TASK_INTERRUPTIBLE);
1887 while (chan->unacked_frames > 0 && chan->conn) {
1891 if (signal_pending(current)) {
1892 err = sock_intr_errno(timeo);
1897 timeo = schedule_timeout(timeo);
1899 set_current_state(TASK_INTERRUPTIBLE);
1901 err = sock_error(sk);
1905 set_current_state(TASK_RUNNING);
1906 remove_wait_queue(sk_sleep(sk), &wait);
1910 static void l2cap_monitor_timeout(struct work_struct *work)
1912 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1913 monitor_timer.work);
1915 BT_DBG("chan %p", chan);
1917 l2cap_chan_lock(chan);
1920 l2cap_chan_unlock(chan);
1921 l2cap_chan_put(chan);
1925 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1927 l2cap_chan_unlock(chan);
1928 l2cap_chan_put(chan);
1931 static void l2cap_retrans_timeout(struct work_struct *work)
1933 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1934 retrans_timer.work);
1936 BT_DBG("chan %p", chan);
1938 l2cap_chan_lock(chan);
1941 l2cap_chan_unlock(chan);
1942 l2cap_chan_put(chan);
1946 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1947 l2cap_chan_unlock(chan);
1948 l2cap_chan_put(chan);
1951 static void l2cap_streaming_send(struct l2cap_chan *chan,
1952 struct sk_buff_head *skbs)
1954 struct sk_buff *skb;
1955 struct l2cap_ctrl *control;
1957 BT_DBG("chan %p, skbs %p", chan, skbs);
1959 if (__chan_is_moving(chan))
1962 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1964 while (!skb_queue_empty(&chan->tx_q)) {
1966 skb = skb_dequeue(&chan->tx_q);
1968 bt_cb(skb)->control.retries = 1;
1969 control = &bt_cb(skb)->control;
1971 control->reqseq = 0;
1972 control->txseq = chan->next_tx_seq;
1974 __pack_control(chan, control, skb);
1976 if (chan->fcs == L2CAP_FCS_CRC16) {
1977 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1978 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1981 l2cap_do_send(chan, skb);
1983 BT_DBG("Sent txseq %u", control->txseq);
1985 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1986 chan->frames_sent++;
1990 static int l2cap_ertm_send(struct l2cap_chan *chan)
1992 struct sk_buff *skb, *tx_skb;
1993 struct l2cap_ctrl *control;
1996 BT_DBG("chan %p", chan);
1998 if (chan->state != BT_CONNECTED)
2001 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2004 if (__chan_is_moving(chan))
2007 while (chan->tx_send_head &&
2008 chan->unacked_frames < chan->remote_tx_win &&
2009 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2011 skb = chan->tx_send_head;
2013 bt_cb(skb)->control.retries = 1;
2014 control = &bt_cb(skb)->control;
2016 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2019 control->reqseq = chan->buffer_seq;
2020 chan->last_acked_seq = chan->buffer_seq;
2021 control->txseq = chan->next_tx_seq;
2023 __pack_control(chan, control, skb);
2025 if (chan->fcs == L2CAP_FCS_CRC16) {
2026 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2027 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2030 /* Clone after data has been modified. Data is assumed to be
2031 read-only (for locking purposes) on cloned sk_buffs.
2033 tx_skb = skb_clone(skb, GFP_KERNEL);
2038 __set_retrans_timer(chan);
2040 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2041 chan->unacked_frames++;
2042 chan->frames_sent++;
2045 if (skb_queue_is_last(&chan->tx_q, skb))
2046 chan->tx_send_head = NULL;
2048 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2050 l2cap_do_send(chan, tx_skb);
2051 BT_DBG("Sent txseq %u", control->txseq);
2054 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2055 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2060 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2062 struct l2cap_ctrl control;
2063 struct sk_buff *skb;
2064 struct sk_buff *tx_skb;
2067 BT_DBG("chan %p", chan);
2069 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2072 if (__chan_is_moving(chan))
2075 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2076 seq = l2cap_seq_list_pop(&chan->retrans_list);
2078 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2080 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2085 bt_cb(skb)->control.retries++;
2086 control = bt_cb(skb)->control;
2088 if (chan->max_tx != 0 &&
2089 bt_cb(skb)->control.retries > chan->max_tx) {
2090 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2091 l2cap_send_disconn_req(chan, ECONNRESET);
2092 l2cap_seq_list_clear(&chan->retrans_list);
2096 control.reqseq = chan->buffer_seq;
2097 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2102 if (skb_cloned(skb)) {
2103 /* Cloned sk_buffs are read-only, so we need a
2106 tx_skb = skb_copy(skb, GFP_KERNEL);
2108 tx_skb = skb_clone(skb, GFP_KERNEL);
2112 l2cap_seq_list_clear(&chan->retrans_list);
2116 /* Update skb contents */
2117 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2118 put_unaligned_le32(__pack_extended_control(&control),
2119 tx_skb->data + L2CAP_HDR_SIZE);
2121 put_unaligned_le16(__pack_enhanced_control(&control),
2122 tx_skb->data + L2CAP_HDR_SIZE);
2125 if (chan->fcs == L2CAP_FCS_CRC16) {
2126 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2127 put_unaligned_le16(fcs, skb_put(tx_skb,
2131 l2cap_do_send(chan, tx_skb);
2133 BT_DBG("Resent txseq %d", control.txseq);
2135 chan->last_acked_seq = chan->buffer_seq;
2139 static void l2cap_retransmit(struct l2cap_chan *chan,
2140 struct l2cap_ctrl *control)
2142 BT_DBG("chan %p, control %p", chan, control);
2144 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2145 l2cap_ertm_resend(chan);
2148 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2149 struct l2cap_ctrl *control)
2151 struct sk_buff *skb;
2153 BT_DBG("chan %p, control %p", chan, control);
2156 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2158 l2cap_seq_list_clear(&chan->retrans_list);
2160 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2163 if (chan->unacked_frames) {
2164 skb_queue_walk(&chan->tx_q, skb) {
2165 if (bt_cb(skb)->control.txseq == control->reqseq ||
2166 skb == chan->tx_send_head)
2170 skb_queue_walk_from(&chan->tx_q, skb) {
2171 if (skb == chan->tx_send_head)
2174 l2cap_seq_list_append(&chan->retrans_list,
2175 bt_cb(skb)->control.txseq);
2178 l2cap_ertm_resend(chan);
2182 static void l2cap_send_ack(struct l2cap_chan *chan)
2184 struct l2cap_ctrl control;
2185 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2186 chan->last_acked_seq);
2189 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2190 chan, chan->last_acked_seq, chan->buffer_seq);
2192 memset(&control, 0, sizeof(control));
2195 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2196 chan->rx_state == L2CAP_RX_STATE_RECV) {
2197 __clear_ack_timer(chan);
2198 control.super = L2CAP_SUPER_RNR;
2199 control.reqseq = chan->buffer_seq;
2200 l2cap_send_sframe(chan, &control);
2202 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2203 l2cap_ertm_send(chan);
2204 /* If any i-frames were sent, they included an ack */
2205 if (chan->buffer_seq == chan->last_acked_seq)
2209 /* Ack now if the window is 3/4ths full.
2210 * Calculate without mul or div
2212 threshold = chan->ack_win;
2213 threshold += threshold << 1;
2216 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2219 if (frames_to_ack >= threshold) {
2220 __clear_ack_timer(chan);
2221 control.super = L2CAP_SUPER_RR;
2222 control.reqseq = chan->buffer_seq;
2223 l2cap_send_sframe(chan, &control);
2228 __set_ack_timer(chan);
2232 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2233 struct msghdr *msg, int len,
2234 int count, struct sk_buff *skb)
2236 struct l2cap_conn *conn = chan->conn;
2237 struct sk_buff **frag;
2240 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2246 /* Continuation fragments (no L2CAP header) */
2247 frag = &skb_shinfo(skb)->frag_list;
2249 struct sk_buff *tmp;
2251 count = min_t(unsigned int, conn->mtu, len);
2253 tmp = chan->ops->alloc_skb(chan, count,
2254 msg->msg_flags & MSG_DONTWAIT);
2256 return PTR_ERR(tmp);
2260 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2263 (*frag)->priority = skb->priority;
2268 skb->len += (*frag)->len;
2269 skb->data_len += (*frag)->len;
2271 frag = &(*frag)->next;
2277 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2278 struct msghdr *msg, size_t len,
2281 struct l2cap_conn *conn = chan->conn;
2282 struct sk_buff *skb;
2283 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2284 struct l2cap_hdr *lh;
2286 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2288 count = min_t(unsigned int, (conn->mtu - hlen), len);
2290 skb = chan->ops->alloc_skb(chan, count + hlen,
2291 msg->msg_flags & MSG_DONTWAIT);
2295 skb->priority = priority;
2297 /* Create L2CAP header */
2298 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2299 lh->cid = cpu_to_le16(chan->dcid);
2300 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2301 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2303 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2304 if (unlikely(err < 0)) {
2306 return ERR_PTR(err);
2311 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2312 struct msghdr *msg, size_t len,
2315 struct l2cap_conn *conn = chan->conn;
2316 struct sk_buff *skb;
2318 struct l2cap_hdr *lh;
2320 BT_DBG("chan %p len %zu", chan, len);
2322 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2324 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2325 msg->msg_flags & MSG_DONTWAIT);
2329 skb->priority = priority;
2331 /* Create L2CAP header */
2332 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2333 lh->cid = cpu_to_le16(chan->dcid);
2334 lh->len = cpu_to_le16(len);
2336 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2337 if (unlikely(err < 0)) {
2339 return ERR_PTR(err);
2344 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2345 struct msghdr *msg, size_t len,
2348 struct l2cap_conn *conn = chan->conn;
2349 struct sk_buff *skb;
2350 int err, count, hlen;
2351 struct l2cap_hdr *lh;
2353 BT_DBG("chan %p len %zu", chan, len);
2356 return ERR_PTR(-ENOTCONN);
2358 hlen = __ertm_hdr_size(chan);
2361 hlen += L2CAP_SDULEN_SIZE;
2363 if (chan->fcs == L2CAP_FCS_CRC16)
2364 hlen += L2CAP_FCS_SIZE;
2366 count = min_t(unsigned int, (conn->mtu - hlen), len);
2368 skb = chan->ops->alloc_skb(chan, count + hlen,
2369 msg->msg_flags & MSG_DONTWAIT);
2373 /* Create L2CAP header */
2374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2375 lh->cid = cpu_to_le16(chan->dcid);
2376 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2378 /* Control header is populated later */
2379 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2380 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2382 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2385 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2387 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2388 if (unlikely(err < 0)) {
2390 return ERR_PTR(err);
2393 bt_cb(skb)->control.fcs = chan->fcs;
2394 bt_cb(skb)->control.retries = 0;
2398 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2399 struct sk_buff_head *seg_queue,
2400 struct msghdr *msg, size_t len)
2402 struct sk_buff *skb;
2407 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2409 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2410 * so fragmented skbs are not used. The HCI layer's handling
2411 * of fragmented skbs is not compatible with ERTM's queueing.
2414 /* PDU size is derived from the HCI MTU */
2415 pdu_len = chan->conn->mtu;
2417 /* Constrain PDU size for BR/EDR connections */
2419 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2421 /* Adjust for largest possible L2CAP overhead. */
2423 pdu_len -= L2CAP_FCS_SIZE;
2425 pdu_len -= __ertm_hdr_size(chan);
2427 /* Remote device may have requested smaller PDUs */
2428 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2430 if (len <= pdu_len) {
2431 sar = L2CAP_SAR_UNSEGMENTED;
2435 sar = L2CAP_SAR_START;
2437 pdu_len -= L2CAP_SDULEN_SIZE;
2441 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2444 __skb_queue_purge(seg_queue);
2445 return PTR_ERR(skb);
2448 bt_cb(skb)->control.sar = sar;
2449 __skb_queue_tail(seg_queue, skb);
2454 pdu_len += L2CAP_SDULEN_SIZE;
2457 if (len <= pdu_len) {
2458 sar = L2CAP_SAR_END;
2461 sar = L2CAP_SAR_CONTINUE;
2468 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2471 struct sk_buff *skb;
2473 struct sk_buff_head seg_queue;
2475 /* Connectionless channel */
2476 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2477 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2479 return PTR_ERR(skb);
2481 l2cap_do_send(chan, skb);
2485 switch (chan->mode) {
2486 case L2CAP_MODE_BASIC:
2487 /* Check outgoing MTU */
2488 if (len > chan->omtu)
2491 /* Create a basic PDU */
2492 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2494 return PTR_ERR(skb);
2496 l2cap_do_send(chan, skb);
2500 case L2CAP_MODE_ERTM:
2501 case L2CAP_MODE_STREAMING:
2502 /* Check outgoing MTU */
2503 if (len > chan->omtu) {
2508 __skb_queue_head_init(&seg_queue);
2510 /* Do segmentation before calling in to the state machine,
2511 * since it's possible to block while waiting for memory
2514 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2516 /* The channel could have been closed while segmenting,
2517 * check that it is still connected.
2519 if (chan->state != BT_CONNECTED) {
2520 __skb_queue_purge(&seg_queue);
2527 if (chan->mode == L2CAP_MODE_ERTM)
2528 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2530 l2cap_streaming_send(chan, &seg_queue);
2534 /* If the skbs were not queued for sending, they'll still be in
2535 * seg_queue and need to be purged.
2537 __skb_queue_purge(&seg_queue);
2541 BT_DBG("bad state %1.1x", chan->mode);
2548 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2550 struct l2cap_ctrl control;
2553 BT_DBG("chan %p, txseq %u", chan, txseq);
2555 memset(&control, 0, sizeof(control));
2557 control.super = L2CAP_SUPER_SREJ;
2559 for (seq = chan->expected_tx_seq; seq != txseq;
2560 seq = __next_seq(chan, seq)) {
2561 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2562 control.reqseq = seq;
2563 l2cap_send_sframe(chan, &control);
2564 l2cap_seq_list_append(&chan->srej_list, seq);
2568 chan->expected_tx_seq = __next_seq(chan, txseq);
2571 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2573 struct l2cap_ctrl control;
2575 BT_DBG("chan %p", chan);
2577 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2580 memset(&control, 0, sizeof(control));
2582 control.super = L2CAP_SUPER_SREJ;
2583 control.reqseq = chan->srej_list.tail;
2584 l2cap_send_sframe(chan, &control);
2587 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2589 struct l2cap_ctrl control;
2593 BT_DBG("chan %p, txseq %u", chan, txseq);
2595 memset(&control, 0, sizeof(control));
2597 control.super = L2CAP_SUPER_SREJ;
2599 /* Capture initial list head to allow only one pass through the list. */
2600 initial_head = chan->srej_list.head;
2603 seq = l2cap_seq_list_pop(&chan->srej_list);
2604 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2607 control.reqseq = seq;
2608 l2cap_send_sframe(chan, &control);
2609 l2cap_seq_list_append(&chan->srej_list, seq);
2610 } while (chan->srej_list.head != initial_head);
2613 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2615 struct sk_buff *acked_skb;
2618 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2620 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2623 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2624 chan->expected_ack_seq, chan->unacked_frames);
2626 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2627 ackseq = __next_seq(chan, ackseq)) {
2629 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2631 skb_unlink(acked_skb, &chan->tx_q);
2632 kfree_skb(acked_skb);
2633 chan->unacked_frames--;
2637 chan->expected_ack_seq = reqseq;
2639 if (chan->unacked_frames == 0)
2640 __clear_retrans_timer(chan);
2642 BT_DBG("unacked_frames %u", chan->unacked_frames);
2645 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2647 BT_DBG("chan %p", chan);
2649 chan->expected_tx_seq = chan->buffer_seq;
2650 l2cap_seq_list_clear(&chan->srej_list);
2651 skb_queue_purge(&chan->srej_q);
2652 chan->rx_state = L2CAP_RX_STATE_RECV;
2655 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2656 struct l2cap_ctrl *control,
2657 struct sk_buff_head *skbs, u8 event)
2659 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2663 case L2CAP_EV_DATA_REQUEST:
2664 if (chan->tx_send_head == NULL)
2665 chan->tx_send_head = skb_peek(skbs);
2667 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2668 l2cap_ertm_send(chan);
2670 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2671 BT_DBG("Enter LOCAL_BUSY");
2672 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2674 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2675 /* The SREJ_SENT state must be aborted if we are to
2676 * enter the LOCAL_BUSY state.
2678 l2cap_abort_rx_srej_sent(chan);
2681 l2cap_send_ack(chan);
2684 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2685 BT_DBG("Exit LOCAL_BUSY");
2686 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2688 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2689 struct l2cap_ctrl local_control;
2691 memset(&local_control, 0, sizeof(local_control));
2692 local_control.sframe = 1;
2693 local_control.super = L2CAP_SUPER_RR;
2694 local_control.poll = 1;
2695 local_control.reqseq = chan->buffer_seq;
2696 l2cap_send_sframe(chan, &local_control);
2698 chan->retry_count = 1;
2699 __set_monitor_timer(chan);
2700 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2703 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2704 l2cap_process_reqseq(chan, control->reqseq);
2706 case L2CAP_EV_EXPLICIT_POLL:
2707 l2cap_send_rr_or_rnr(chan, 1);
2708 chan->retry_count = 1;
2709 __set_monitor_timer(chan);
2710 __clear_ack_timer(chan);
2711 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2713 case L2CAP_EV_RETRANS_TO:
2714 l2cap_send_rr_or_rnr(chan, 1);
2715 chan->retry_count = 1;
2716 __set_monitor_timer(chan);
2717 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2719 case L2CAP_EV_RECV_FBIT:
2720 /* Nothing to process */
2727 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2728 struct l2cap_ctrl *control,
2729 struct sk_buff_head *skbs, u8 event)
2731 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2735 case L2CAP_EV_DATA_REQUEST:
2736 if (chan->tx_send_head == NULL)
2737 chan->tx_send_head = skb_peek(skbs);
2738 /* Queue data, but don't send. */
2739 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2741 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2742 BT_DBG("Enter LOCAL_BUSY");
2743 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2745 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2746 /* The SREJ_SENT state must be aborted if we are to
2747 * enter the LOCAL_BUSY state.
2749 l2cap_abort_rx_srej_sent(chan);
2752 l2cap_send_ack(chan);
2755 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2756 BT_DBG("Exit LOCAL_BUSY");
2757 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2759 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2760 struct l2cap_ctrl local_control;
2761 memset(&local_control, 0, sizeof(local_control));
2762 local_control.sframe = 1;
2763 local_control.super = L2CAP_SUPER_RR;
2764 local_control.poll = 1;
2765 local_control.reqseq = chan->buffer_seq;
2766 l2cap_send_sframe(chan, &local_control);
2768 chan->retry_count = 1;
2769 __set_monitor_timer(chan);
2770 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2773 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2774 l2cap_process_reqseq(chan, control->reqseq);
2778 case L2CAP_EV_RECV_FBIT:
2779 if (control && control->final) {
2780 __clear_monitor_timer(chan);
2781 if (chan->unacked_frames > 0)
2782 __set_retrans_timer(chan);
2783 chan->retry_count = 0;
2784 chan->tx_state = L2CAP_TX_STATE_XMIT;
2785 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2788 case L2CAP_EV_EXPLICIT_POLL:
2791 case L2CAP_EV_MONITOR_TO:
2792 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2793 l2cap_send_rr_or_rnr(chan, 1);
2794 __set_monitor_timer(chan);
2795 chan->retry_count++;
2797 l2cap_send_disconn_req(chan, ECONNABORTED);
2805 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2806 struct sk_buff_head *skbs, u8 event)
2808 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2809 chan, control, skbs, event, chan->tx_state);
2811 switch (chan->tx_state) {
2812 case L2CAP_TX_STATE_XMIT:
2813 l2cap_tx_state_xmit(chan, control, skbs, event);
2815 case L2CAP_TX_STATE_WAIT_F:
2816 l2cap_tx_state_wait_f(chan, control, skbs, event);
2824 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2825 struct l2cap_ctrl *control)
2827 BT_DBG("chan %p, control %p", chan, control);
2828 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2831 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2832 struct l2cap_ctrl *control)
2834 BT_DBG("chan %p, control %p", chan, control);
2835 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2838 /* Copy frame to all raw sockets on that connection */
2839 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2841 struct sk_buff *nskb;
2842 struct l2cap_chan *chan;
2844 BT_DBG("conn %p", conn);
2846 mutex_lock(&conn->chan_lock);
2848 list_for_each_entry(chan, &conn->chan_l, list) {
2849 struct sock *sk = chan->sk;
2850 if (chan->chan_type != L2CAP_CHAN_RAW)
2853 /* Don't send frame to the socket it came from */
2856 nskb = skb_clone(skb, GFP_KERNEL);
2860 if (chan->ops->recv(chan, nskb))
2864 mutex_unlock(&conn->chan_lock);
2867 /* ---- L2CAP signalling commands ---- */
2868 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2869 u8 ident, u16 dlen, void *data)
2871 struct sk_buff *skb, **frag;
2872 struct l2cap_cmd_hdr *cmd;
2873 struct l2cap_hdr *lh;
2876 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2877 conn, code, ident, dlen);
2879 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2882 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2883 count = min_t(unsigned int, conn->mtu, len);
2885 skb = bt_skb_alloc(count, GFP_KERNEL);
2889 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2890 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2892 if (conn->hcon->type == LE_LINK)
2893 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2895 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2897 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2900 cmd->len = cpu_to_le16(dlen);
2903 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2904 memcpy(skb_put(skb, count), data, count);
2910 /* Continuation fragments (no L2CAP header) */
2911 frag = &skb_shinfo(skb)->frag_list;
2913 count = min_t(unsigned int, conn->mtu, len);
2915 *frag = bt_skb_alloc(count, GFP_KERNEL);
2919 memcpy(skb_put(*frag, count), data, count);
2924 frag = &(*frag)->next;
2934 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2937 struct l2cap_conf_opt *opt = *ptr;
2940 len = L2CAP_CONF_OPT_SIZE + opt->len;
2948 *val = *((u8 *) opt->val);
2952 *val = get_unaligned_le16(opt->val);
2956 *val = get_unaligned_le32(opt->val);
2960 *val = (unsigned long) opt->val;
2964 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2968 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2970 struct l2cap_conf_opt *opt = *ptr;
2972 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2979 *((u8 *) opt->val) = val;
2983 put_unaligned_le16(val, opt->val);
2987 put_unaligned_le32(val, opt->val);
2991 memcpy(opt->val, (void *) val, len);
2995 *ptr += L2CAP_CONF_OPT_SIZE + len;
2998 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3000 struct l2cap_conf_efs efs;
3002 switch (chan->mode) {
3003 case L2CAP_MODE_ERTM:
3004 efs.id = chan->local_id;
3005 efs.stype = chan->local_stype;
3006 efs.msdu = cpu_to_le16(chan->local_msdu);
3007 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3008 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3009 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3012 case L2CAP_MODE_STREAMING:
3014 efs.stype = L2CAP_SERV_BESTEFFORT;
3015 efs.msdu = cpu_to_le16(chan->local_msdu);
3016 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3025 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3026 (unsigned long) &efs);
3029 static void l2cap_ack_timeout(struct work_struct *work)
3031 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3035 BT_DBG("chan %p", chan);
3037 l2cap_chan_lock(chan);
3039 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3040 chan->last_acked_seq);
3043 l2cap_send_rr_or_rnr(chan, 0);
3045 l2cap_chan_unlock(chan);
3046 l2cap_chan_put(chan);
3049 int l2cap_ertm_init(struct l2cap_chan *chan)
3053 chan->next_tx_seq = 0;
3054 chan->expected_tx_seq = 0;
3055 chan->expected_ack_seq = 0;
3056 chan->unacked_frames = 0;
3057 chan->buffer_seq = 0;
3058 chan->frames_sent = 0;
3059 chan->last_acked_seq = 0;
3061 chan->sdu_last_frag = NULL;
3064 skb_queue_head_init(&chan->tx_q);
3066 chan->local_amp_id = AMP_ID_BREDR;
3067 chan->move_id = AMP_ID_BREDR;
3068 chan->move_state = L2CAP_MOVE_STABLE;
3069 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3071 if (chan->mode != L2CAP_MODE_ERTM)
3074 chan->rx_state = L2CAP_RX_STATE_RECV;
3075 chan->tx_state = L2CAP_TX_STATE_XMIT;
3077 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3078 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3079 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3081 skb_queue_head_init(&chan->srej_q);
3083 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3087 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3089 l2cap_seq_list_free(&chan->srej_list);
3094 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3097 case L2CAP_MODE_STREAMING:
3098 case L2CAP_MODE_ERTM:
3099 if (l2cap_mode_supported(mode, remote_feat_mask))
3103 return L2CAP_MODE_BASIC;
3107 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3109 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3112 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3114 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3117 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3118 struct l2cap_conf_rfc *rfc)
3120 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3121 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3123 /* Class 1 devices have must have ERTM timeouts
3124 * exceeding the Link Supervision Timeout. The
3125 * default Link Supervision Timeout for AMP
3126 * controllers is 10 seconds.
3128 * Class 1 devices use 0xffffffff for their
3129 * best-effort flush timeout, so the clamping logic
3130 * will result in a timeout that meets the above
3131 * requirement. ERTM timeouts are 16-bit values, so
3132 * the maximum timeout is 65.535 seconds.
3135 /* Convert timeout to milliseconds and round */
3136 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3138 /* This is the recommended formula for class 2 devices
3139 * that start ERTM timers when packets are sent to the
3142 ertm_to = 3 * ertm_to + 500;
3144 if (ertm_to > 0xffff)
3147 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3148 rfc->monitor_timeout = rfc->retrans_timeout;
3150 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3151 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3155 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3157 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3158 __l2cap_ews_supported(chan->conn)) {
3159 /* use extended control field */
3160 set_bit(FLAG_EXT_CTRL, &chan->flags);
3161 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3163 chan->tx_win = min_t(u16, chan->tx_win,
3164 L2CAP_DEFAULT_TX_WINDOW);
3165 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3167 chan->ack_win = chan->tx_win;
3170 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3172 struct l2cap_conf_req *req = data;
3173 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3174 void *ptr = req->data;
3177 BT_DBG("chan %p", chan);
3179 if (chan->num_conf_req || chan->num_conf_rsp)
3182 switch (chan->mode) {
3183 case L2CAP_MODE_STREAMING:
3184 case L2CAP_MODE_ERTM:
3185 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3188 if (__l2cap_efs_supported(chan->conn))
3189 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3193 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3198 if (chan->imtu != L2CAP_DEFAULT_MTU)
3199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3201 switch (chan->mode) {
3202 case L2CAP_MODE_BASIC:
3203 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3204 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3207 rfc.mode = L2CAP_MODE_BASIC;
3209 rfc.max_transmit = 0;
3210 rfc.retrans_timeout = 0;
3211 rfc.monitor_timeout = 0;
3212 rfc.max_pdu_size = 0;
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3215 (unsigned long) &rfc);
3218 case L2CAP_MODE_ERTM:
3219 rfc.mode = L2CAP_MODE_ERTM;
3220 rfc.max_transmit = chan->max_tx;
3222 __l2cap_set_ertm_timeouts(chan, &rfc);
3224 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3225 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3227 rfc.max_pdu_size = cpu_to_le16(size);
3229 l2cap_txwin_setup(chan);
3231 rfc.txwin_size = min_t(u16, chan->tx_win,
3232 L2CAP_DEFAULT_TX_WINDOW);
3234 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3235 (unsigned long) &rfc);
3237 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3238 l2cap_add_opt_efs(&ptr, chan);
3240 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3244 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3245 if (chan->fcs == L2CAP_FCS_NONE ||
3246 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3247 chan->fcs = L2CAP_FCS_NONE;
3248 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3253 case L2CAP_MODE_STREAMING:
3254 l2cap_txwin_setup(chan);
3255 rfc.mode = L2CAP_MODE_STREAMING;
3257 rfc.max_transmit = 0;
3258 rfc.retrans_timeout = 0;
3259 rfc.monitor_timeout = 0;
3261 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3262 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3264 rfc.max_pdu_size = cpu_to_le16(size);
3266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3267 (unsigned long) &rfc);
3269 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3270 l2cap_add_opt_efs(&ptr, chan);
3272 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3273 if (chan->fcs == L2CAP_FCS_NONE ||
3274 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3275 chan->fcs = L2CAP_FCS_NONE;
3276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3282 req->dcid = cpu_to_le16(chan->dcid);
3283 req->flags = __constant_cpu_to_le16(0);
3288 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3290 struct l2cap_conf_rsp *rsp = data;
3291 void *ptr = rsp->data;
3292 void *req = chan->conf_req;
3293 int len = chan->conf_len;
3294 int type, hint, olen;
3296 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3297 struct l2cap_conf_efs efs;
3299 u16 mtu = L2CAP_DEFAULT_MTU;
3300 u16 result = L2CAP_CONF_SUCCESS;
3303 BT_DBG("chan %p", chan);
3305 while (len >= L2CAP_CONF_OPT_SIZE) {
3306 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3308 hint = type & L2CAP_CONF_HINT;
3309 type &= L2CAP_CONF_MASK;
3312 case L2CAP_CONF_MTU:
3316 case L2CAP_CONF_FLUSH_TO:
3317 chan->flush_to = val;
3320 case L2CAP_CONF_QOS:
3323 case L2CAP_CONF_RFC:
3324 if (olen == sizeof(rfc))
3325 memcpy(&rfc, (void *) val, olen);
3328 case L2CAP_CONF_FCS:
3329 if (val == L2CAP_FCS_NONE)
3330 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3333 case L2CAP_CONF_EFS:
3335 if (olen == sizeof(efs))
3336 memcpy(&efs, (void *) val, olen);
3339 case L2CAP_CONF_EWS:
3340 if (!chan->conn->hs_enabled)
3341 return -ECONNREFUSED;
3343 set_bit(FLAG_EXT_CTRL, &chan->flags);
3344 set_bit(CONF_EWS_RECV, &chan->conf_state);
3345 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3346 chan->remote_tx_win = val;
3353 result = L2CAP_CONF_UNKNOWN;
3354 *((u8 *) ptr++) = type;
3359 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3362 switch (chan->mode) {
3363 case L2CAP_MODE_STREAMING:
3364 case L2CAP_MODE_ERTM:
3365 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3366 chan->mode = l2cap_select_mode(rfc.mode,
3367 chan->conn->feat_mask);
3372 if (__l2cap_efs_supported(chan->conn))
3373 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3375 return -ECONNREFUSED;
3378 if (chan->mode != rfc.mode)
3379 return -ECONNREFUSED;
3385 if (chan->mode != rfc.mode) {
3386 result = L2CAP_CONF_UNACCEPT;
3387 rfc.mode = chan->mode;
3389 if (chan->num_conf_rsp == 1)
3390 return -ECONNREFUSED;
3392 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3393 (unsigned long) &rfc);
3396 if (result == L2CAP_CONF_SUCCESS) {
3397 /* Configure output options and let the other side know
3398 * which ones we don't like. */
3400 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3401 result = L2CAP_CONF_UNACCEPT;
3404 set_bit(CONF_MTU_DONE, &chan->conf_state);
3406 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3409 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3410 efs.stype != L2CAP_SERV_NOTRAFIC &&
3411 efs.stype != chan->local_stype) {
3413 result = L2CAP_CONF_UNACCEPT;
3415 if (chan->num_conf_req >= 1)
3416 return -ECONNREFUSED;
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3420 (unsigned long) &efs);
3422 /* Send PENDING Conf Rsp */
3423 result = L2CAP_CONF_PENDING;
3424 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3429 case L2CAP_MODE_BASIC:
3430 chan->fcs = L2CAP_FCS_NONE;
3431 set_bit(CONF_MODE_DONE, &chan->conf_state);
3434 case L2CAP_MODE_ERTM:
3435 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3436 chan->remote_tx_win = rfc.txwin_size;
3438 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3440 chan->remote_max_tx = rfc.max_transmit;
3442 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3443 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3444 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3445 rfc.max_pdu_size = cpu_to_le16(size);
3446 chan->remote_mps = size;
3448 __l2cap_set_ertm_timeouts(chan, &rfc);
3450 set_bit(CONF_MODE_DONE, &chan->conf_state);
3452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3453 sizeof(rfc), (unsigned long) &rfc);
3455 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3456 chan->remote_id = efs.id;
3457 chan->remote_stype = efs.stype;
3458 chan->remote_msdu = le16_to_cpu(efs.msdu);
3459 chan->remote_flush_to =
3460 le32_to_cpu(efs.flush_to);
3461 chan->remote_acc_lat =
3462 le32_to_cpu(efs.acc_lat);
3463 chan->remote_sdu_itime =
3464 le32_to_cpu(efs.sdu_itime);
3465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3467 (unsigned long) &efs);
3471 case L2CAP_MODE_STREAMING:
3472 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3473 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3474 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3475 rfc.max_pdu_size = cpu_to_le16(size);
3476 chan->remote_mps = size;
3478 set_bit(CONF_MODE_DONE, &chan->conf_state);
3480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3481 (unsigned long) &rfc);
3486 result = L2CAP_CONF_UNACCEPT;
3488 memset(&rfc, 0, sizeof(rfc));
3489 rfc.mode = chan->mode;
3492 if (result == L2CAP_CONF_SUCCESS)
3493 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3495 rsp->scid = cpu_to_le16(chan->dcid);
3496 rsp->result = cpu_to_le16(result);
3497 rsp->flags = __constant_cpu_to_le16(0);
3502 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3503 void *data, u16 *result)
3505 struct l2cap_conf_req *req = data;
3506 void *ptr = req->data;
3509 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3510 struct l2cap_conf_efs efs;
3512 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3514 while (len >= L2CAP_CONF_OPT_SIZE) {
3515 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3518 case L2CAP_CONF_MTU:
3519 if (val < L2CAP_DEFAULT_MIN_MTU) {
3520 *result = L2CAP_CONF_UNACCEPT;
3521 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3527 case L2CAP_CONF_FLUSH_TO:
3528 chan->flush_to = val;
3529 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3533 case L2CAP_CONF_RFC:
3534 if (olen == sizeof(rfc))
3535 memcpy(&rfc, (void *)val, olen);
3537 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3538 rfc.mode != chan->mode)
3539 return -ECONNREFUSED;
3543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3544 sizeof(rfc), (unsigned long) &rfc);
3547 case L2CAP_CONF_EWS:
3548 chan->ack_win = min_t(u16, val, chan->ack_win);
3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3553 case L2CAP_CONF_EFS:
3554 if (olen == sizeof(efs))
3555 memcpy(&efs, (void *)val, olen);
3557 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3558 efs.stype != L2CAP_SERV_NOTRAFIC &&
3559 efs.stype != chan->local_stype)
3560 return -ECONNREFUSED;
3562 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3563 (unsigned long) &efs);
3566 case L2CAP_CONF_FCS:
3567 if (*result == L2CAP_CONF_PENDING)
3568 if (val == L2CAP_FCS_NONE)
3569 set_bit(CONF_RECV_NO_FCS,
3575 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3576 return -ECONNREFUSED;
3578 chan->mode = rfc.mode;
3580 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3582 case L2CAP_MODE_ERTM:
3583 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3584 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3585 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3586 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3587 chan->ack_win = min_t(u16, chan->ack_win,
3590 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3591 chan->local_msdu = le16_to_cpu(efs.msdu);
3592 chan->local_sdu_itime =
3593 le32_to_cpu(efs.sdu_itime);
3594 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3595 chan->local_flush_to =
3596 le32_to_cpu(efs.flush_to);
3600 case L2CAP_MODE_STREAMING:
3601 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3605 req->dcid = cpu_to_le16(chan->dcid);
3606 req->flags = __constant_cpu_to_le16(0);
3611 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3612 u16 result, u16 flags)
3614 struct l2cap_conf_rsp *rsp = data;
3615 void *ptr = rsp->data;
3617 BT_DBG("chan %p", chan);
3619 rsp->scid = cpu_to_le16(chan->dcid);
3620 rsp->result = cpu_to_le16(result);
3621 rsp->flags = cpu_to_le16(flags);
3626 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3628 struct l2cap_conn_rsp rsp;
3629 struct l2cap_conn *conn = chan->conn;
3633 rsp.scid = cpu_to_le16(chan->dcid);
3634 rsp.dcid = cpu_to_le16(chan->scid);
3635 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3636 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3639 rsp_code = L2CAP_CREATE_CHAN_RSP;
3641 rsp_code = L2CAP_CONN_RSP;
3643 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3645 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3647 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3650 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3651 l2cap_build_conf_req(chan, buf), buf);
3652 chan->num_conf_req++;
3655 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3659 /* Use sane default values in case a misbehaving remote device
3660 * did not send an RFC or extended window size option.
3662 u16 txwin_ext = chan->ack_win;
3663 struct l2cap_conf_rfc rfc = {
3665 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3666 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3667 .max_pdu_size = cpu_to_le16(chan->imtu),
3668 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3671 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3673 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3676 while (len >= L2CAP_CONF_OPT_SIZE) {
3677 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3680 case L2CAP_CONF_RFC:
3681 if (olen == sizeof(rfc))
3682 memcpy(&rfc, (void *)val, olen);
3684 case L2CAP_CONF_EWS:
3691 case L2CAP_MODE_ERTM:
3692 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3693 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3694 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3695 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3696 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3698 chan->ack_win = min_t(u16, chan->ack_win,
3701 case L2CAP_MODE_STREAMING:
3702 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3706 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3707 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3710 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3712 if (cmd_len < sizeof(*rej))
3715 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3718 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3719 cmd->ident == conn->info_ident) {
3720 cancel_delayed_work(&conn->info_timer);
3722 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3723 conn->info_ident = 0;
3725 l2cap_conn_start(conn);
3731 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3732 struct l2cap_cmd_hdr *cmd,
3733 u8 *data, u8 rsp_code, u8 amp_id)
3735 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3736 struct l2cap_conn_rsp rsp;
3737 struct l2cap_chan *chan = NULL, *pchan;
3738 struct sock *parent, *sk = NULL;
3739 int result, status = L2CAP_CS_NO_INFO;
3741 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3742 __le16 psm = req->psm;
3744 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3746 /* Check if we have socket listening on psm */
3747 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3749 result = L2CAP_CR_BAD_PSM;
3755 mutex_lock(&conn->chan_lock);
3758 /* Check if the ACL is secure enough (if not SDP) */
3759 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3760 !hci_conn_check_link_mode(conn->hcon)) {
3761 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3762 result = L2CAP_CR_SEC_BLOCK;
3766 result = L2CAP_CR_NO_MEM;
3768 /* Check if we already have channel with that dcid */
3769 if (__l2cap_get_chan_by_dcid(conn, scid))
3772 chan = pchan->ops->new_connection(pchan);
3778 /* For certain devices (ex: HID mouse), support for authentication,
3779 * pairing and bonding is optional. For such devices, inorder to avoid
3780 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3781 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3783 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3785 bacpy(&bt_sk(sk)->src, conn->src);
3786 bacpy(&bt_sk(sk)->dst, conn->dst);
3789 chan->local_amp_id = amp_id;
3791 __l2cap_chan_add(conn, chan);
3795 __set_chan_timer(chan, sk->sk_sndtimeo);
3797 chan->ident = cmd->ident;
3799 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3800 if (l2cap_chan_check_security(chan)) {
3801 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3802 __l2cap_state_change(chan, BT_CONNECT2);
3803 result = L2CAP_CR_PEND;
3804 status = L2CAP_CS_AUTHOR_PEND;
3805 chan->ops->defer(chan);
3807 /* Force pending result for AMP controllers.
3808 * The connection will succeed after the
3809 * physical link is up.
3811 if (amp_id == AMP_ID_BREDR) {
3812 __l2cap_state_change(chan, BT_CONFIG);
3813 result = L2CAP_CR_SUCCESS;
3815 __l2cap_state_change(chan, BT_CONNECT2);
3816 result = L2CAP_CR_PEND;
3818 status = L2CAP_CS_NO_INFO;
3821 __l2cap_state_change(chan, BT_CONNECT2);
3822 result = L2CAP_CR_PEND;
3823 status = L2CAP_CS_AUTHEN_PEND;
3826 __l2cap_state_change(chan, BT_CONNECT2);
3827 result = L2CAP_CR_PEND;
3828 status = L2CAP_CS_NO_INFO;
3832 release_sock(parent);
3833 mutex_unlock(&conn->chan_lock);
3836 rsp.scid = cpu_to_le16(scid);
3837 rsp.dcid = cpu_to_le16(dcid);
3838 rsp.result = cpu_to_le16(result);
3839 rsp.status = cpu_to_le16(status);
3840 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3842 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3843 struct l2cap_info_req info;
3844 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3847 conn->info_ident = l2cap_get_ident(conn);
3849 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3851 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3852 sizeof(info), &info);
3855 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3856 result == L2CAP_CR_SUCCESS) {
3858 set_bit(CONF_REQ_SENT, &chan->conf_state);
3859 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3860 l2cap_build_conf_req(chan, buf), buf);
3861 chan->num_conf_req++;
3867 static int l2cap_connect_req(struct l2cap_conn *conn,
3868 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3870 struct hci_dev *hdev = conn->hcon->hdev;
3871 struct hci_conn *hcon = conn->hcon;
3873 if (cmd_len < sizeof(struct l2cap_conn_req))
3877 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3878 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3879 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3880 hcon->dst_type, 0, NULL, 0,
3882 hci_dev_unlock(hdev);
3884 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3888 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3889 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3892 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3893 u16 scid, dcid, result, status;
3894 struct l2cap_chan *chan;
3898 if (cmd_len < sizeof(*rsp))
3901 scid = __le16_to_cpu(rsp->scid);
3902 dcid = __le16_to_cpu(rsp->dcid);
3903 result = __le16_to_cpu(rsp->result);
3904 status = __le16_to_cpu(rsp->status);
3906 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3907 dcid, scid, result, status);
3909 mutex_lock(&conn->chan_lock);
3912 chan = __l2cap_get_chan_by_scid(conn, scid);
3918 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3927 l2cap_chan_lock(chan);
3930 case L2CAP_CR_SUCCESS:
3931 l2cap_state_change(chan, BT_CONFIG);
3934 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3936 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3939 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3940 l2cap_build_conf_req(chan, req), req);
3941 chan->num_conf_req++;
3945 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3949 l2cap_chan_del(chan, ECONNREFUSED);
3953 l2cap_chan_unlock(chan);
3956 mutex_unlock(&conn->chan_lock);
3961 static inline void set_default_fcs(struct l2cap_chan *chan)
3963 /* FCS is enabled only in ERTM or streaming mode, if one or both
3966 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3967 chan->fcs = L2CAP_FCS_NONE;
3968 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3969 chan->fcs = L2CAP_FCS_CRC16;
3972 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3973 u8 ident, u16 flags)
3975 struct l2cap_conn *conn = chan->conn;
3977 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3980 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3981 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3983 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3984 l2cap_build_conf_rsp(chan, data,
3985 L2CAP_CONF_SUCCESS, flags), data);
3988 static inline int l2cap_config_req(struct l2cap_conn *conn,
3989 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3992 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3995 struct l2cap_chan *chan;
3998 if (cmd_len < sizeof(*req))
4001 dcid = __le16_to_cpu(req->dcid);
4002 flags = __le16_to_cpu(req->flags);
4004 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4006 chan = l2cap_get_chan_by_scid(conn, dcid);
4010 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4011 struct l2cap_cmd_rej_cid rej;
4013 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4014 rej.scid = cpu_to_le16(chan->scid);
4015 rej.dcid = cpu_to_le16(chan->dcid);
4017 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4022 /* Reject if config buffer is too small. */
4023 len = cmd_len - sizeof(*req);
4024 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4025 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4026 l2cap_build_conf_rsp(chan, rsp,
4027 L2CAP_CONF_REJECT, flags), rsp);
4032 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4033 chan->conf_len += len;
4035 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4036 /* Incomplete config. Send empty response. */
4037 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4038 l2cap_build_conf_rsp(chan, rsp,
4039 L2CAP_CONF_SUCCESS, flags), rsp);
4043 /* Complete config. */
4044 len = l2cap_parse_conf_req(chan, rsp);
4046 l2cap_send_disconn_req(chan, ECONNRESET);
4050 chan->ident = cmd->ident;
4051 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4052 chan->num_conf_rsp++;
4054 /* Reset config buffer. */
4057 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4060 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4061 set_default_fcs(chan);
4063 if (chan->mode == L2CAP_MODE_ERTM ||
4064 chan->mode == L2CAP_MODE_STREAMING)
4065 err = l2cap_ertm_init(chan);
4068 l2cap_send_disconn_req(chan, -err);
4070 l2cap_chan_ready(chan);
4075 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4077 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4078 l2cap_build_conf_req(chan, buf), buf);
4079 chan->num_conf_req++;
4082 /* Got Conf Rsp PENDING from remote side and asume we sent
4083 Conf Rsp PENDING in the code above */
4084 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4085 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4087 /* check compatibility */
4089 /* Send rsp for BR/EDR channel */
4091 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4093 chan->ident = cmd->ident;
4097 l2cap_chan_unlock(chan);
4101 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4102 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4105 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4106 u16 scid, flags, result;
4107 struct l2cap_chan *chan;
4108 int len = cmd_len - sizeof(*rsp);
4111 if (cmd_len < sizeof(*rsp))
4114 scid = __le16_to_cpu(rsp->scid);
4115 flags = __le16_to_cpu(rsp->flags);
4116 result = __le16_to_cpu(rsp->result);
4118 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4121 chan = l2cap_get_chan_by_scid(conn, scid);
4126 case L2CAP_CONF_SUCCESS:
4127 l2cap_conf_rfc_get(chan, rsp->data, len);
4128 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4131 case L2CAP_CONF_PENDING:
4132 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4134 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4137 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4140 l2cap_send_disconn_req(chan, ECONNRESET);
4144 if (!chan->hs_hcon) {
4145 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4148 if (l2cap_check_efs(chan)) {
4149 amp_create_logical_link(chan);
4150 chan->ident = cmd->ident;
4156 case L2CAP_CONF_UNACCEPT:
4157 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4160 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4161 l2cap_send_disconn_req(chan, ECONNRESET);
4165 /* throw out any old stored conf requests */
4166 result = L2CAP_CONF_SUCCESS;
4167 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4170 l2cap_send_disconn_req(chan, ECONNRESET);
4174 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4175 L2CAP_CONF_REQ, len, req);
4176 chan->num_conf_req++;
4177 if (result != L2CAP_CONF_SUCCESS)
4183 l2cap_chan_set_err(chan, ECONNRESET);
4185 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4186 l2cap_send_disconn_req(chan, ECONNRESET);
4190 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4193 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4195 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4196 set_default_fcs(chan);
4198 if (chan->mode == L2CAP_MODE_ERTM ||
4199 chan->mode == L2CAP_MODE_STREAMING)
4200 err = l2cap_ertm_init(chan);
4203 l2cap_send_disconn_req(chan, -err);
4205 l2cap_chan_ready(chan);
4209 l2cap_chan_unlock(chan);
4213 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4214 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4217 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4218 struct l2cap_disconn_rsp rsp;
4220 struct l2cap_chan *chan;
4223 if (cmd_len != sizeof(*req))
4226 scid = __le16_to_cpu(req->scid);
4227 dcid = __le16_to_cpu(req->dcid);
4229 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4231 mutex_lock(&conn->chan_lock);
4233 chan = __l2cap_get_chan_by_scid(conn, dcid);
4235 mutex_unlock(&conn->chan_lock);
4239 l2cap_chan_lock(chan);
4243 rsp.dcid = cpu_to_le16(chan->scid);
4244 rsp.scid = cpu_to_le16(chan->dcid);
4245 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4248 sk->sk_shutdown = SHUTDOWN_MASK;
4251 l2cap_chan_hold(chan);
4252 l2cap_chan_del(chan, ECONNRESET);
4254 l2cap_chan_unlock(chan);
4256 chan->ops->close(chan);
4257 l2cap_chan_put(chan);
4259 mutex_unlock(&conn->chan_lock);
4264 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4265 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4268 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4270 struct l2cap_chan *chan;
4272 if (cmd_len != sizeof(*rsp))
4275 scid = __le16_to_cpu(rsp->scid);
4276 dcid = __le16_to_cpu(rsp->dcid);
4278 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4280 mutex_lock(&conn->chan_lock);
4282 chan = __l2cap_get_chan_by_scid(conn, scid);
4284 mutex_unlock(&conn->chan_lock);
4288 l2cap_chan_lock(chan);
4290 l2cap_chan_hold(chan);
4291 l2cap_chan_del(chan, 0);
4293 l2cap_chan_unlock(chan);
4295 chan->ops->close(chan);
4296 l2cap_chan_put(chan);
4298 mutex_unlock(&conn->chan_lock);
4303 static inline int l2cap_information_req(struct l2cap_conn *conn,
4304 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4307 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4310 if (cmd_len != sizeof(*req))
4313 type = __le16_to_cpu(req->type);
4315 BT_DBG("type 0x%4.4x", type);
4317 if (type == L2CAP_IT_FEAT_MASK) {
4319 u32 feat_mask = l2cap_feat_mask;
4320 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4321 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4322 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4324 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4326 if (conn->hs_enabled)
4327 feat_mask |= L2CAP_FEAT_EXT_FLOW
4328 | L2CAP_FEAT_EXT_WINDOW;
4330 put_unaligned_le32(feat_mask, rsp->data);
4331 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4333 } else if (type == L2CAP_IT_FIXED_CHAN) {
4335 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4337 if (conn->hs_enabled)
4338 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4340 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4342 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4343 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4344 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4345 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4348 struct l2cap_info_rsp rsp;
4349 rsp.type = cpu_to_le16(type);
4350 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4351 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4358 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4359 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4362 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4365 if (cmd_len < sizeof(*rsp))
4368 type = __le16_to_cpu(rsp->type);
4369 result = __le16_to_cpu(rsp->result);
4371 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4373 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4374 if (cmd->ident != conn->info_ident ||
4375 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4378 cancel_delayed_work(&conn->info_timer);
4380 if (result != L2CAP_IR_SUCCESS) {
4381 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4382 conn->info_ident = 0;
4384 l2cap_conn_start(conn);
4390 case L2CAP_IT_FEAT_MASK:
4391 conn->feat_mask = get_unaligned_le32(rsp->data);
4393 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4394 struct l2cap_info_req req;
4395 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4397 conn->info_ident = l2cap_get_ident(conn);
4399 l2cap_send_cmd(conn, conn->info_ident,
4400 L2CAP_INFO_REQ, sizeof(req), &req);
4402 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4403 conn->info_ident = 0;
4405 l2cap_conn_start(conn);
4409 case L2CAP_IT_FIXED_CHAN:
4410 conn->fixed_chan_mask = rsp->data[0];
4411 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4412 conn->info_ident = 0;
4414 l2cap_conn_start(conn);
4421 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4422 struct l2cap_cmd_hdr *cmd,
4423 u16 cmd_len, void *data)
4425 struct l2cap_create_chan_req *req = data;
4426 struct l2cap_create_chan_rsp rsp;
4427 struct l2cap_chan *chan;
4428 struct hci_dev *hdev;
4431 if (cmd_len != sizeof(*req))
4434 if (!conn->hs_enabled)
4437 psm = le16_to_cpu(req->psm);
4438 scid = le16_to_cpu(req->scid);
4440 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4442 /* For controller id 0 make BR/EDR connection */
4443 if (req->amp_id == AMP_ID_BREDR) {
4444 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4449 /* Validate AMP controller id */
4450 hdev = hci_dev_get(req->amp_id);
4454 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4459 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4462 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4463 struct hci_conn *hs_hcon;
4465 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4471 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4473 mgr->bredr_chan = chan;
4474 chan->hs_hcon = hs_hcon;
4475 chan->fcs = L2CAP_FCS_NONE;
4476 conn->mtu = hdev->block_mtu;
4485 rsp.scid = cpu_to_le16(scid);
4486 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4487 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4489 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4495 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4497 struct l2cap_move_chan_req req;
4500 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4502 ident = l2cap_get_ident(chan->conn);
4503 chan->ident = ident;
4505 req.icid = cpu_to_le16(chan->scid);
4506 req.dest_amp_id = dest_amp_id;
4508 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4511 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4514 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4516 struct l2cap_move_chan_rsp rsp;
4518 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4520 rsp.icid = cpu_to_le16(chan->dcid);
4521 rsp.result = cpu_to_le16(result);
4523 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4527 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4529 struct l2cap_move_chan_cfm cfm;
4531 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4533 chan->ident = l2cap_get_ident(chan->conn);
4535 cfm.icid = cpu_to_le16(chan->scid);
4536 cfm.result = cpu_to_le16(result);
4538 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4541 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4544 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4546 struct l2cap_move_chan_cfm cfm;
4548 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4550 cfm.icid = cpu_to_le16(icid);
4551 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4553 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4557 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4560 struct l2cap_move_chan_cfm_rsp rsp;
4562 BT_DBG("icid 0x%4.4x", icid);
4564 rsp.icid = cpu_to_le16(icid);
4565 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4568 static void __release_logical_link(struct l2cap_chan *chan)
4570 chan->hs_hchan = NULL;
4571 chan->hs_hcon = NULL;
4573 /* Placeholder - release the logical link */
4576 static void l2cap_logical_fail(struct l2cap_chan *chan)
4578 /* Logical link setup failed */
4579 if (chan->state != BT_CONNECTED) {
4580 /* Create channel failure, disconnect */
4581 l2cap_send_disconn_req(chan, ECONNRESET);
4585 switch (chan->move_role) {
4586 case L2CAP_MOVE_ROLE_RESPONDER:
4587 l2cap_move_done(chan);
4588 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4590 case L2CAP_MOVE_ROLE_INITIATOR:
4591 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4592 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4593 /* Remote has only sent pending or
4594 * success responses, clean up
4596 l2cap_move_done(chan);
4599 /* Other amp move states imply that the move
4600 * has already aborted
4602 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4607 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4608 struct hci_chan *hchan)
4610 struct l2cap_conf_rsp rsp;
4612 chan->hs_hchan = hchan;
4613 chan->hs_hcon->l2cap_data = chan->conn;
4615 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4617 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4620 set_default_fcs(chan);
4622 err = l2cap_ertm_init(chan);
4624 l2cap_send_disconn_req(chan, -err);
4626 l2cap_chan_ready(chan);
4630 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4631 struct hci_chan *hchan)
4633 chan->hs_hcon = hchan->conn;
4634 chan->hs_hcon->l2cap_data = chan->conn;
4636 BT_DBG("move_state %d", chan->move_state);
4638 switch (chan->move_state) {
4639 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4640 /* Move confirm will be sent after a success
4641 * response is received
4643 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4645 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4646 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4647 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4648 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4649 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4650 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4651 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4652 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4653 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4657 /* Move was not in expected state, free the channel */
4658 __release_logical_link(chan);
4660 chan->move_state = L2CAP_MOVE_STABLE;
4664 /* Call with chan locked */
4665 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4668 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4671 l2cap_logical_fail(chan);
4672 __release_logical_link(chan);
4676 if (chan->state != BT_CONNECTED) {
4677 /* Ignore logical link if channel is on BR/EDR */
4678 if (chan->local_amp_id != AMP_ID_BREDR)
4679 l2cap_logical_finish_create(chan, hchan);
4681 l2cap_logical_finish_move(chan, hchan);
4685 void l2cap_move_start(struct l2cap_chan *chan)
4687 BT_DBG("chan %p", chan);
4689 if (chan->local_amp_id == AMP_ID_BREDR) {
4690 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4692 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4693 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4694 /* Placeholder - start physical link setup */
4696 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4697 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4699 l2cap_move_setup(chan);
4700 l2cap_send_move_chan_req(chan, 0);
4704 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4705 u8 local_amp_id, u8 remote_amp_id)
4707 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4708 local_amp_id, remote_amp_id);
4710 chan->fcs = L2CAP_FCS_NONE;
4712 /* Outgoing channel on AMP */
4713 if (chan->state == BT_CONNECT) {
4714 if (result == L2CAP_CR_SUCCESS) {
4715 chan->local_amp_id = local_amp_id;
4716 l2cap_send_create_chan_req(chan, remote_amp_id);
4718 /* Revert to BR/EDR connect */
4719 l2cap_send_conn_req(chan);
4725 /* Incoming channel on AMP */
4726 if (__l2cap_no_conn_pending(chan)) {
4727 struct l2cap_conn_rsp rsp;
4729 rsp.scid = cpu_to_le16(chan->dcid);
4730 rsp.dcid = cpu_to_le16(chan->scid);
4732 if (result == L2CAP_CR_SUCCESS) {
4733 /* Send successful response */
4734 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4735 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4737 /* Send negative response */
4738 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4739 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4742 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4745 if (result == L2CAP_CR_SUCCESS) {
4746 __l2cap_state_change(chan, BT_CONFIG);
4747 set_bit(CONF_REQ_SENT, &chan->conf_state);
4748 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4750 l2cap_build_conf_req(chan, buf), buf);
4751 chan->num_conf_req++;
4756 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4759 l2cap_move_setup(chan);
4760 chan->move_id = local_amp_id;
4761 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4763 l2cap_send_move_chan_req(chan, remote_amp_id);
4766 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4768 struct hci_chan *hchan = NULL;
4770 /* Placeholder - get hci_chan for logical link */
4773 if (hchan->state == BT_CONNECTED) {
4774 /* Logical link is ready to go */
4775 chan->hs_hcon = hchan->conn;
4776 chan->hs_hcon->l2cap_data = chan->conn;
4777 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4778 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4780 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4782 /* Wait for logical link to be ready */
4783 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4786 /* Logical link not available */
4787 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4791 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4793 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4795 if (result == -EINVAL)
4796 rsp_result = L2CAP_MR_BAD_ID;
4798 rsp_result = L2CAP_MR_NOT_ALLOWED;
4800 l2cap_send_move_chan_rsp(chan, rsp_result);
4803 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4804 chan->move_state = L2CAP_MOVE_STABLE;
4806 /* Restart data transmission */
4807 l2cap_ertm_send(chan);
4810 /* Invoke with locked chan */
4811 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4813 u8 local_amp_id = chan->local_amp_id;
4814 u8 remote_amp_id = chan->remote_amp_id;
4816 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4817 chan, result, local_amp_id, remote_amp_id);
4819 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4820 l2cap_chan_unlock(chan);
4824 if (chan->state != BT_CONNECTED) {
4825 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4826 } else if (result != L2CAP_MR_SUCCESS) {
4827 l2cap_do_move_cancel(chan, result);
4829 switch (chan->move_role) {
4830 case L2CAP_MOVE_ROLE_INITIATOR:
4831 l2cap_do_move_initiate(chan, local_amp_id,
4834 case L2CAP_MOVE_ROLE_RESPONDER:
4835 l2cap_do_move_respond(chan, result);
4838 l2cap_do_move_cancel(chan, result);
4844 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4845 struct l2cap_cmd_hdr *cmd,
4846 u16 cmd_len, void *data)
4848 struct l2cap_move_chan_req *req = data;
4849 struct l2cap_move_chan_rsp rsp;
4850 struct l2cap_chan *chan;
4852 u16 result = L2CAP_MR_NOT_ALLOWED;
4854 if (cmd_len != sizeof(*req))
4857 icid = le16_to_cpu(req->icid);
4859 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4861 if (!conn->hs_enabled)
4864 chan = l2cap_get_chan_by_dcid(conn, icid);
4866 rsp.icid = cpu_to_le16(icid);
4867 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4868 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4873 chan->ident = cmd->ident;
4875 if (chan->scid < L2CAP_CID_DYN_START ||
4876 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4877 (chan->mode != L2CAP_MODE_ERTM &&
4878 chan->mode != L2CAP_MODE_STREAMING)) {
4879 result = L2CAP_MR_NOT_ALLOWED;
4880 goto send_move_response;
4883 if (chan->local_amp_id == req->dest_amp_id) {
4884 result = L2CAP_MR_SAME_ID;
4885 goto send_move_response;
4888 if (req->dest_amp_id != AMP_ID_BREDR) {
4889 struct hci_dev *hdev;
4890 hdev = hci_dev_get(req->dest_amp_id);
4891 if (!hdev || hdev->dev_type != HCI_AMP ||
4892 !test_bit(HCI_UP, &hdev->flags)) {
4896 result = L2CAP_MR_BAD_ID;
4897 goto send_move_response;
4902 /* Detect a move collision. Only send a collision response
4903 * if this side has "lost", otherwise proceed with the move.
4904 * The winner has the larger bd_addr.
4906 if ((__chan_is_moving(chan) ||
4907 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4908 bacmp(conn->src, conn->dst) > 0) {
4909 result = L2CAP_MR_COLLISION;
4910 goto send_move_response;
4913 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4914 l2cap_move_setup(chan);
4915 chan->move_id = req->dest_amp_id;
4918 if (req->dest_amp_id == AMP_ID_BREDR) {
4919 /* Moving to BR/EDR */
4920 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4921 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4922 result = L2CAP_MR_PEND;
4924 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4925 result = L2CAP_MR_SUCCESS;
4928 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4929 /* Placeholder - uncomment when amp functions are available */
4930 /*amp_accept_physical(chan, req->dest_amp_id);*/
4931 result = L2CAP_MR_PEND;
4935 l2cap_send_move_chan_rsp(chan, result);
4937 l2cap_chan_unlock(chan);
4942 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4944 struct l2cap_chan *chan;
4945 struct hci_chan *hchan = NULL;
4947 chan = l2cap_get_chan_by_scid(conn, icid);
4949 l2cap_send_move_chan_cfm_icid(conn, icid);
4953 __clear_chan_timer(chan);
4954 if (result == L2CAP_MR_PEND)
4955 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4957 switch (chan->move_state) {
4958 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4959 /* Move confirm will be sent when logical link
4962 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4964 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4965 if (result == L2CAP_MR_PEND) {
4967 } else if (test_bit(CONN_LOCAL_BUSY,
4968 &chan->conn_state)) {
4969 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4971 /* Logical link is up or moving to BR/EDR,
4974 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4975 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4978 case L2CAP_MOVE_WAIT_RSP:
4980 if (result == L2CAP_MR_SUCCESS) {
4981 /* Remote is ready, send confirm immediately
4982 * after logical link is ready
4984 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4986 /* Both logical link and move success
4987 * are required to confirm
4989 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4992 /* Placeholder - get hci_chan for logical link */
4994 /* Logical link not available */
4995 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4999 /* If the logical link is not yet connected, do not
5000 * send confirmation.
5002 if (hchan->state != BT_CONNECTED)
5005 /* Logical link is already ready to go */
5007 chan->hs_hcon = hchan->conn;
5008 chan->hs_hcon->l2cap_data = chan->conn;
5010 if (result == L2CAP_MR_SUCCESS) {
5011 /* Can confirm now */
5012 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5014 /* Now only need move success
5017 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5020 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5023 /* Any other amp move state means the move failed. */
5024 chan->move_id = chan->local_amp_id;
5025 l2cap_move_done(chan);
5026 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 l2cap_chan_unlock(chan);
5032 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5035 struct l2cap_chan *chan;
5037 chan = l2cap_get_chan_by_ident(conn, ident);
5039 /* Could not locate channel, icid is best guess */
5040 l2cap_send_move_chan_cfm_icid(conn, icid);
5044 __clear_chan_timer(chan);
5046 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5047 if (result == L2CAP_MR_COLLISION) {
5048 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5050 /* Cleanup - cancel move */
5051 chan->move_id = chan->local_amp_id;
5052 l2cap_move_done(chan);
5056 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5058 l2cap_chan_unlock(chan);
5061 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5062 struct l2cap_cmd_hdr *cmd,
5063 u16 cmd_len, void *data)
5065 struct l2cap_move_chan_rsp *rsp = data;
5068 if (cmd_len != sizeof(*rsp))
5071 icid = le16_to_cpu(rsp->icid);
5072 result = le16_to_cpu(rsp->result);
5074 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5076 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5077 l2cap_move_continue(conn, icid, result);
5079 l2cap_move_fail(conn, cmd->ident, icid, result);
5084 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5085 struct l2cap_cmd_hdr *cmd,
5086 u16 cmd_len, void *data)
5088 struct l2cap_move_chan_cfm *cfm = data;
5089 struct l2cap_chan *chan;
5092 if (cmd_len != sizeof(*cfm))
5095 icid = le16_to_cpu(cfm->icid);
5096 result = le16_to_cpu(cfm->result);
5098 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5100 chan = l2cap_get_chan_by_dcid(conn, icid);
5102 /* Spec requires a response even if the icid was not found */
5103 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5107 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5108 if (result == L2CAP_MC_CONFIRMED) {
5109 chan->local_amp_id = chan->move_id;
5110 if (chan->local_amp_id == AMP_ID_BREDR)
5111 __release_logical_link(chan);
5113 chan->move_id = chan->local_amp_id;
5116 l2cap_move_done(chan);
5119 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5121 l2cap_chan_unlock(chan);
5126 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5127 struct l2cap_cmd_hdr *cmd,
5128 u16 cmd_len, void *data)
5130 struct l2cap_move_chan_cfm_rsp *rsp = data;
5131 struct l2cap_chan *chan;
5134 if (cmd_len != sizeof(*rsp))
5137 icid = le16_to_cpu(rsp->icid);
5139 BT_DBG("icid 0x%4.4x", icid);
5141 chan = l2cap_get_chan_by_scid(conn, icid);
5145 __clear_chan_timer(chan);
5147 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5148 chan->local_amp_id = chan->move_id;
5150 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5151 __release_logical_link(chan);
5153 l2cap_move_done(chan);
5156 l2cap_chan_unlock(chan);
5161 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5166 if (min > max || min < 6 || max > 3200)
5169 if (to_multiplier < 10 || to_multiplier > 3200)
5172 if (max >= to_multiplier * 8)
5175 max_latency = (to_multiplier * 8 / max) - 1;
5176 if (latency > 499 || latency > max_latency)
5182 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5183 struct l2cap_cmd_hdr *cmd,
5186 struct hci_conn *hcon = conn->hcon;
5187 struct l2cap_conn_param_update_req *req;
5188 struct l2cap_conn_param_update_rsp rsp;
5189 u16 min, max, latency, to_multiplier, cmd_len;
5192 if (!(hcon->link_mode & HCI_LM_MASTER))
5195 cmd_len = __le16_to_cpu(cmd->len);
5196 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5199 req = (struct l2cap_conn_param_update_req *) data;
5200 min = __le16_to_cpu(req->min);
5201 max = __le16_to_cpu(req->max);
5202 latency = __le16_to_cpu(req->latency);
5203 to_multiplier = __le16_to_cpu(req->to_multiplier);
5205 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5206 min, max, latency, to_multiplier);
5208 memset(&rsp, 0, sizeof(rsp));
5210 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5212 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5214 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5216 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5220 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5225 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5226 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5231 switch (cmd->code) {
5232 case L2CAP_COMMAND_REJ:
5233 l2cap_command_rej(conn, cmd, cmd_len, data);
5236 case L2CAP_CONN_REQ:
5237 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5240 case L2CAP_CONN_RSP:
5241 case L2CAP_CREATE_CHAN_RSP:
5242 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5245 case L2CAP_CONF_REQ:
5246 err = l2cap_config_req(conn, cmd, cmd_len, data);
5249 case L2CAP_CONF_RSP:
5250 l2cap_config_rsp(conn, cmd, cmd_len, data);
5253 case L2CAP_DISCONN_REQ:
5254 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5257 case L2CAP_DISCONN_RSP:
5258 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5261 case L2CAP_ECHO_REQ:
5262 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5265 case L2CAP_ECHO_RSP:
5268 case L2CAP_INFO_REQ:
5269 err = l2cap_information_req(conn, cmd, cmd_len, data);
5272 case L2CAP_INFO_RSP:
5273 l2cap_information_rsp(conn, cmd, cmd_len, data);
5276 case L2CAP_CREATE_CHAN_REQ:
5277 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5280 case L2CAP_MOVE_CHAN_REQ:
5281 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5284 case L2CAP_MOVE_CHAN_RSP:
5285 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5288 case L2CAP_MOVE_CHAN_CFM:
5289 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5292 case L2CAP_MOVE_CHAN_CFM_RSP:
5293 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5297 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5305 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5306 struct l2cap_cmd_hdr *cmd, u8 *data)
5308 switch (cmd->code) {
5309 case L2CAP_COMMAND_REJ:
5312 case L2CAP_CONN_PARAM_UPDATE_REQ:
5313 return l2cap_conn_param_update_req(conn, cmd, data);
5315 case L2CAP_CONN_PARAM_UPDATE_RSP:
5319 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5324 static __le16 l2cap_err_to_reason(int err)
5328 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5330 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5334 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5338 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5339 struct sk_buff *skb)
5341 struct hci_conn *hcon = conn->hcon;
5342 struct l2cap_cmd_hdr *cmd;
5346 if (hcon->type != LE_LINK)
5349 if (skb->len < L2CAP_CMD_HDR_SIZE)
5352 cmd = (void *) skb->data;
5353 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5355 len = le16_to_cpu(cmd->len);
5357 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5359 if (len != skb->len || !cmd->ident) {
5360 BT_DBG("corrupted command");
5364 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5366 struct l2cap_cmd_rej_unk rej;
5368 BT_ERR("Wrong link type (%d)", err);
5370 rej.reason = l2cap_err_to_reason(err);
5371 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5379 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5380 struct sk_buff *skb)
5382 struct hci_conn *hcon = conn->hcon;
5383 u8 *data = skb->data;
5385 struct l2cap_cmd_hdr cmd;
5388 l2cap_raw_recv(conn, skb);
5390 if (hcon->type != ACL_LINK)
5393 while (len >= L2CAP_CMD_HDR_SIZE) {
5395 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5396 data += L2CAP_CMD_HDR_SIZE;
5397 len -= L2CAP_CMD_HDR_SIZE;
5399 cmd_len = le16_to_cpu(cmd.len);
5401 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5404 if (cmd_len > len || !cmd.ident) {
5405 BT_DBG("corrupted command");
5409 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5411 struct l2cap_cmd_rej_unk rej;
5413 BT_ERR("Wrong link type (%d)", err);
5415 rej.reason = l2cap_err_to_reason(err);
5416 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5428 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5430 u16 our_fcs, rcv_fcs;
5433 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5434 hdr_size = L2CAP_EXT_HDR_SIZE;
5436 hdr_size = L2CAP_ENH_HDR_SIZE;
5438 if (chan->fcs == L2CAP_FCS_CRC16) {
5439 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5440 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5441 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5443 if (our_fcs != rcv_fcs)
5449 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5451 struct l2cap_ctrl control;
5453 BT_DBG("chan %p", chan);
5455 memset(&control, 0, sizeof(control));
5458 control.reqseq = chan->buffer_seq;
5459 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5461 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5462 control.super = L2CAP_SUPER_RNR;
5463 l2cap_send_sframe(chan, &control);
5466 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5467 chan->unacked_frames > 0)
5468 __set_retrans_timer(chan);
5470 /* Send pending iframes */
5471 l2cap_ertm_send(chan);
5473 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5474 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5475 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5478 control.super = L2CAP_SUPER_RR;
5479 l2cap_send_sframe(chan, &control);
5483 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5484 struct sk_buff **last_frag)
5486 /* skb->len reflects data in skb as well as all fragments
5487 * skb->data_len reflects only data in fragments
5489 if (!skb_has_frag_list(skb))
5490 skb_shinfo(skb)->frag_list = new_frag;
5492 new_frag->next = NULL;
5494 (*last_frag)->next = new_frag;
5495 *last_frag = new_frag;
5497 skb->len += new_frag->len;
5498 skb->data_len += new_frag->len;
5499 skb->truesize += new_frag->truesize;
5502 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5503 struct l2cap_ctrl *control)
5507 switch (control->sar) {
5508 case L2CAP_SAR_UNSEGMENTED:
5512 err = chan->ops->recv(chan, skb);
5515 case L2CAP_SAR_START:
5519 chan->sdu_len = get_unaligned_le16(skb->data);
5520 skb_pull(skb, L2CAP_SDULEN_SIZE);
5522 if (chan->sdu_len > chan->imtu) {
5527 if (skb->len >= chan->sdu_len)
5531 chan->sdu_last_frag = skb;
5537 case L2CAP_SAR_CONTINUE:
5541 append_skb_frag(chan->sdu, skb,
5542 &chan->sdu_last_frag);
5545 if (chan->sdu->len >= chan->sdu_len)
5555 append_skb_frag(chan->sdu, skb,
5556 &chan->sdu_last_frag);
5559 if (chan->sdu->len != chan->sdu_len)
5562 err = chan->ops->recv(chan, chan->sdu);
5565 /* Reassembly complete */
5567 chan->sdu_last_frag = NULL;
5575 kfree_skb(chan->sdu);
5577 chan->sdu_last_frag = NULL;
5584 static int l2cap_resegment(struct l2cap_chan *chan)
5590 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5594 if (chan->mode != L2CAP_MODE_ERTM)
5597 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5598 l2cap_tx(chan, NULL, NULL, event);
5601 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5604 /* Pass sequential frames to l2cap_reassemble_sdu()
5605 * until a gap is encountered.
5608 BT_DBG("chan %p", chan);
5610 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5611 struct sk_buff *skb;
5612 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5613 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5615 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5620 skb_unlink(skb, &chan->srej_q);
5621 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5622 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5627 if (skb_queue_empty(&chan->srej_q)) {
5628 chan->rx_state = L2CAP_RX_STATE_RECV;
5629 l2cap_send_ack(chan);
5635 static void l2cap_handle_srej(struct l2cap_chan *chan,
5636 struct l2cap_ctrl *control)
5638 struct sk_buff *skb;
5640 BT_DBG("chan %p, control %p", chan, control);
5642 if (control->reqseq == chan->next_tx_seq) {
5643 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5644 l2cap_send_disconn_req(chan, ECONNRESET);
5648 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5651 BT_DBG("Seq %d not available for retransmission",
5656 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5657 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5658 l2cap_send_disconn_req(chan, ECONNRESET);
5662 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5664 if (control->poll) {
5665 l2cap_pass_to_tx(chan, control);
5667 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5668 l2cap_retransmit(chan, control);
5669 l2cap_ertm_send(chan);
5671 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5672 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5673 chan->srej_save_reqseq = control->reqseq;
5676 l2cap_pass_to_tx_fbit(chan, control);
5678 if (control->final) {
5679 if (chan->srej_save_reqseq != control->reqseq ||
5680 !test_and_clear_bit(CONN_SREJ_ACT,
5682 l2cap_retransmit(chan, control);
5684 l2cap_retransmit(chan, control);
5685 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5686 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5687 chan->srej_save_reqseq = control->reqseq;
5693 static void l2cap_handle_rej(struct l2cap_chan *chan,
5694 struct l2cap_ctrl *control)
5696 struct sk_buff *skb;
5698 BT_DBG("chan %p, control %p", chan, control);
5700 if (control->reqseq == chan->next_tx_seq) {
5701 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5702 l2cap_send_disconn_req(chan, ECONNRESET);
5706 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5708 if (chan->max_tx && skb &&
5709 bt_cb(skb)->control.retries >= chan->max_tx) {
5710 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5711 l2cap_send_disconn_req(chan, ECONNRESET);
5715 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5717 l2cap_pass_to_tx(chan, control);
5719 if (control->final) {
5720 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5721 l2cap_retransmit_all(chan, control);
5723 l2cap_retransmit_all(chan, control);
5724 l2cap_ertm_send(chan);
5725 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5726 set_bit(CONN_REJ_ACT, &chan->conn_state);
5730 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5732 BT_DBG("chan %p, txseq %d", chan, txseq);
5734 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5735 chan->expected_tx_seq);
5737 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5738 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5740 /* See notes below regarding "double poll" and
5743 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5744 BT_DBG("Invalid/Ignore - after SREJ");
5745 return L2CAP_TXSEQ_INVALID_IGNORE;
5747 BT_DBG("Invalid - in window after SREJ sent");
5748 return L2CAP_TXSEQ_INVALID;
5752 if (chan->srej_list.head == txseq) {
5753 BT_DBG("Expected SREJ");
5754 return L2CAP_TXSEQ_EXPECTED_SREJ;
5757 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5758 BT_DBG("Duplicate SREJ - txseq already stored");
5759 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5762 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5763 BT_DBG("Unexpected SREJ - not requested");
5764 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5768 if (chan->expected_tx_seq == txseq) {
5769 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5771 BT_DBG("Invalid - txseq outside tx window");
5772 return L2CAP_TXSEQ_INVALID;
5775 return L2CAP_TXSEQ_EXPECTED;
5779 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5780 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5781 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5782 return L2CAP_TXSEQ_DUPLICATE;
5785 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5786 /* A source of invalid packets is a "double poll" condition,
5787 * where delays cause us to send multiple poll packets. If
5788 * the remote stack receives and processes both polls,
5789 * sequence numbers can wrap around in such a way that a
5790 * resent frame has a sequence number that looks like new data
5791 * with a sequence gap. This would trigger an erroneous SREJ
5794 * Fortunately, this is impossible with a tx window that's
5795 * less than half of the maximum sequence number, which allows
5796 * invalid frames to be safely ignored.
5798 * With tx window sizes greater than half of the tx window
5799 * maximum, the frame is invalid and cannot be ignored. This
5800 * causes a disconnect.
5803 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5804 BT_DBG("Invalid/Ignore - txseq outside tx window");
5805 return L2CAP_TXSEQ_INVALID_IGNORE;
5807 BT_DBG("Invalid - txseq outside tx window");
5808 return L2CAP_TXSEQ_INVALID;
5811 BT_DBG("Unexpected - txseq indicates missing frames");
5812 return L2CAP_TXSEQ_UNEXPECTED;
5816 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5817 struct l2cap_ctrl *control,
5818 struct sk_buff *skb, u8 event)
5821 bool skb_in_use = false;
5823 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5827 case L2CAP_EV_RECV_IFRAME:
5828 switch (l2cap_classify_txseq(chan, control->txseq)) {
5829 case L2CAP_TXSEQ_EXPECTED:
5830 l2cap_pass_to_tx(chan, control);
5832 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5833 BT_DBG("Busy, discarding expected seq %d",
5838 chan->expected_tx_seq = __next_seq(chan,
5841 chan->buffer_seq = chan->expected_tx_seq;
5844 err = l2cap_reassemble_sdu(chan, skb, control);
5848 if (control->final) {
5849 if (!test_and_clear_bit(CONN_REJ_ACT,
5850 &chan->conn_state)) {
5852 l2cap_retransmit_all(chan, control);
5853 l2cap_ertm_send(chan);
5857 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5858 l2cap_send_ack(chan);
5860 case L2CAP_TXSEQ_UNEXPECTED:
5861 l2cap_pass_to_tx(chan, control);
5863 /* Can't issue SREJ frames in the local busy state.
5864 * Drop this frame, it will be seen as missing
5865 * when local busy is exited.
5867 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5868 BT_DBG("Busy, discarding unexpected seq %d",
5873 /* There was a gap in the sequence, so an SREJ
5874 * must be sent for each missing frame. The
5875 * current frame is stored for later use.
5877 skb_queue_tail(&chan->srej_q, skb);
5879 BT_DBG("Queued %p (queue len %d)", skb,
5880 skb_queue_len(&chan->srej_q));
5882 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5883 l2cap_seq_list_clear(&chan->srej_list);
5884 l2cap_send_srej(chan, control->txseq);
5886 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5888 case L2CAP_TXSEQ_DUPLICATE:
5889 l2cap_pass_to_tx(chan, control);
5891 case L2CAP_TXSEQ_INVALID_IGNORE:
5893 case L2CAP_TXSEQ_INVALID:
5895 l2cap_send_disconn_req(chan, ECONNRESET);
5899 case L2CAP_EV_RECV_RR:
5900 l2cap_pass_to_tx(chan, control);
5901 if (control->final) {
5902 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5904 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5905 !__chan_is_moving(chan)) {
5907 l2cap_retransmit_all(chan, control);
5910 l2cap_ertm_send(chan);
5911 } else if (control->poll) {
5912 l2cap_send_i_or_rr_or_rnr(chan);
5914 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5915 &chan->conn_state) &&
5916 chan->unacked_frames)
5917 __set_retrans_timer(chan);
5919 l2cap_ertm_send(chan);
5922 case L2CAP_EV_RECV_RNR:
5923 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5924 l2cap_pass_to_tx(chan, control);
5925 if (control && control->poll) {
5926 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5927 l2cap_send_rr_or_rnr(chan, 0);
5929 __clear_retrans_timer(chan);
5930 l2cap_seq_list_clear(&chan->retrans_list);
5932 case L2CAP_EV_RECV_REJ:
5933 l2cap_handle_rej(chan, control);
5935 case L2CAP_EV_RECV_SREJ:
5936 l2cap_handle_srej(chan, control);
5942 if (skb && !skb_in_use) {
5943 BT_DBG("Freeing %p", skb);
5950 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5951 struct l2cap_ctrl *control,
5952 struct sk_buff *skb, u8 event)
5955 u16 txseq = control->txseq;
5956 bool skb_in_use = false;
5958 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5962 case L2CAP_EV_RECV_IFRAME:
5963 switch (l2cap_classify_txseq(chan, txseq)) {
5964 case L2CAP_TXSEQ_EXPECTED:
5965 /* Keep frame for reassembly later */
5966 l2cap_pass_to_tx(chan, control);
5967 skb_queue_tail(&chan->srej_q, skb);
5969 BT_DBG("Queued %p (queue len %d)", skb,
5970 skb_queue_len(&chan->srej_q));
5972 chan->expected_tx_seq = __next_seq(chan, txseq);
5974 case L2CAP_TXSEQ_EXPECTED_SREJ:
5975 l2cap_seq_list_pop(&chan->srej_list);
5977 l2cap_pass_to_tx(chan, control);
5978 skb_queue_tail(&chan->srej_q, skb);
5980 BT_DBG("Queued %p (queue len %d)", skb,
5981 skb_queue_len(&chan->srej_q));
5983 err = l2cap_rx_queued_iframes(chan);
5988 case L2CAP_TXSEQ_UNEXPECTED:
5989 /* Got a frame that can't be reassembled yet.
5990 * Save it for later, and send SREJs to cover
5991 * the missing frames.
5993 skb_queue_tail(&chan->srej_q, skb);
5995 BT_DBG("Queued %p (queue len %d)", skb,
5996 skb_queue_len(&chan->srej_q));
5998 l2cap_pass_to_tx(chan, control);
5999 l2cap_send_srej(chan, control->txseq);
6001 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6002 /* This frame was requested with an SREJ, but
6003 * some expected retransmitted frames are
6004 * missing. Request retransmission of missing
6007 skb_queue_tail(&chan->srej_q, skb);
6009 BT_DBG("Queued %p (queue len %d)", skb,
6010 skb_queue_len(&chan->srej_q));
6012 l2cap_pass_to_tx(chan, control);
6013 l2cap_send_srej_list(chan, control->txseq);
6015 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6016 /* We've already queued this frame. Drop this copy. */
6017 l2cap_pass_to_tx(chan, control);
6019 case L2CAP_TXSEQ_DUPLICATE:
6020 /* Expecting a later sequence number, so this frame
6021 * was already received. Ignore it completely.
6024 case L2CAP_TXSEQ_INVALID_IGNORE:
6026 case L2CAP_TXSEQ_INVALID:
6028 l2cap_send_disconn_req(chan, ECONNRESET);
6032 case L2CAP_EV_RECV_RR:
6033 l2cap_pass_to_tx(chan, control);
6034 if (control->final) {
6035 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6037 if (!test_and_clear_bit(CONN_REJ_ACT,
6038 &chan->conn_state)) {
6040 l2cap_retransmit_all(chan, control);
6043 l2cap_ertm_send(chan);
6044 } else if (control->poll) {
6045 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6046 &chan->conn_state) &&
6047 chan->unacked_frames) {
6048 __set_retrans_timer(chan);
6051 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6052 l2cap_send_srej_tail(chan);
6054 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6055 &chan->conn_state) &&
6056 chan->unacked_frames)
6057 __set_retrans_timer(chan);
6059 l2cap_send_ack(chan);
6062 case L2CAP_EV_RECV_RNR:
6063 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6064 l2cap_pass_to_tx(chan, control);
6065 if (control->poll) {
6066 l2cap_send_srej_tail(chan);
6068 struct l2cap_ctrl rr_control;
6069 memset(&rr_control, 0, sizeof(rr_control));
6070 rr_control.sframe = 1;
6071 rr_control.super = L2CAP_SUPER_RR;
6072 rr_control.reqseq = chan->buffer_seq;
6073 l2cap_send_sframe(chan, &rr_control);
6077 case L2CAP_EV_RECV_REJ:
6078 l2cap_handle_rej(chan, control);
6080 case L2CAP_EV_RECV_SREJ:
6081 l2cap_handle_srej(chan, control);
6085 if (skb && !skb_in_use) {
6086 BT_DBG("Freeing %p", skb);
6093 static int l2cap_finish_move(struct l2cap_chan *chan)
6095 BT_DBG("chan %p", chan);
6097 chan->rx_state = L2CAP_RX_STATE_RECV;
6100 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6102 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6104 return l2cap_resegment(chan);
6107 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6108 struct l2cap_ctrl *control,
6109 struct sk_buff *skb, u8 event)
6113 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6119 l2cap_process_reqseq(chan, control->reqseq);
6121 if (!skb_queue_empty(&chan->tx_q))
6122 chan->tx_send_head = skb_peek(&chan->tx_q);
6124 chan->tx_send_head = NULL;
6126 /* Rewind next_tx_seq to the point expected
6129 chan->next_tx_seq = control->reqseq;
6130 chan->unacked_frames = 0;
6132 err = l2cap_finish_move(chan);
6136 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6137 l2cap_send_i_or_rr_or_rnr(chan);
6139 if (event == L2CAP_EV_RECV_IFRAME)
6142 return l2cap_rx_state_recv(chan, control, NULL, event);
6145 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6146 struct l2cap_ctrl *control,
6147 struct sk_buff *skb, u8 event)
6151 if (!control->final)
6154 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6156 chan->rx_state = L2CAP_RX_STATE_RECV;
6157 l2cap_process_reqseq(chan, control->reqseq);
6159 if (!skb_queue_empty(&chan->tx_q))
6160 chan->tx_send_head = skb_peek(&chan->tx_q);
6162 chan->tx_send_head = NULL;
6164 /* Rewind next_tx_seq to the point expected
6167 chan->next_tx_seq = control->reqseq;
6168 chan->unacked_frames = 0;
6171 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6173 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6175 err = l2cap_resegment(chan);
6178 err = l2cap_rx_state_recv(chan, control, skb, event);
6183 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6185 /* Make sure reqseq is for a packet that has been sent but not acked */
6188 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6189 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6192 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6193 struct sk_buff *skb, u8 event)
6197 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6198 control, skb, event, chan->rx_state);
6200 if (__valid_reqseq(chan, control->reqseq)) {
6201 switch (chan->rx_state) {
6202 case L2CAP_RX_STATE_RECV:
6203 err = l2cap_rx_state_recv(chan, control, skb, event);
6205 case L2CAP_RX_STATE_SREJ_SENT:
6206 err = l2cap_rx_state_srej_sent(chan, control, skb,
6209 case L2CAP_RX_STATE_WAIT_P:
6210 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6212 case L2CAP_RX_STATE_WAIT_F:
6213 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6220 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6221 control->reqseq, chan->next_tx_seq,
6222 chan->expected_ack_seq);
6223 l2cap_send_disconn_req(chan, ECONNRESET);
6229 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6230 struct sk_buff *skb)
6234 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6237 if (l2cap_classify_txseq(chan, control->txseq) ==
6238 L2CAP_TXSEQ_EXPECTED) {
6239 l2cap_pass_to_tx(chan, control);
6241 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6242 __next_seq(chan, chan->buffer_seq));
6244 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6246 l2cap_reassemble_sdu(chan, skb, control);
6249 kfree_skb(chan->sdu);
6252 chan->sdu_last_frag = NULL;
6256 BT_DBG("Freeing %p", skb);
6261 chan->last_acked_seq = control->txseq;
6262 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6267 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6269 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6273 __unpack_control(chan, skb);
6278 * We can just drop the corrupted I-frame here.
6279 * Receiver will miss it and start proper recovery
6280 * procedures and ask for retransmission.
6282 if (l2cap_check_fcs(chan, skb))
6285 if (!control->sframe && control->sar == L2CAP_SAR_START)
6286 len -= L2CAP_SDULEN_SIZE;
6288 if (chan->fcs == L2CAP_FCS_CRC16)
6289 len -= L2CAP_FCS_SIZE;
6291 if (len > chan->mps) {
6292 l2cap_send_disconn_req(chan, ECONNRESET);
6296 if (!control->sframe) {
6299 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6300 control->sar, control->reqseq, control->final,
6303 /* Validate F-bit - F=0 always valid, F=1 only
6304 * valid in TX WAIT_F
6306 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6309 if (chan->mode != L2CAP_MODE_STREAMING) {
6310 event = L2CAP_EV_RECV_IFRAME;
6311 err = l2cap_rx(chan, control, skb, event);
6313 err = l2cap_stream_rx(chan, control, skb);
6317 l2cap_send_disconn_req(chan, ECONNRESET);
6319 const u8 rx_func_to_event[4] = {
6320 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6321 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6324 /* Only I-frames are expected in streaming mode */
6325 if (chan->mode == L2CAP_MODE_STREAMING)
6328 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6329 control->reqseq, control->final, control->poll,
6333 BT_ERR("Trailing bytes: %d in sframe", len);
6334 l2cap_send_disconn_req(chan, ECONNRESET);
6338 /* Validate F and P bits */
6339 if (control->final && (control->poll ||
6340 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6343 event = rx_func_to_event[control->super];
6344 if (l2cap_rx(chan, control, skb, event))
6345 l2cap_send_disconn_req(chan, ECONNRESET);
6355 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6356 struct sk_buff *skb)
6358 struct l2cap_chan *chan;
6360 chan = l2cap_get_chan_by_scid(conn, cid);
6362 if (cid == L2CAP_CID_A2MP) {
6363 chan = a2mp_channel_create(conn, skb);
6369 l2cap_chan_lock(chan);
6371 BT_DBG("unknown cid 0x%4.4x", cid);
6372 /* Drop packet and return */
6378 BT_DBG("chan %p, len %d", chan, skb->len);
6380 if (chan->state != BT_CONNECTED)
6383 switch (chan->mode) {
6384 case L2CAP_MODE_BASIC:
6385 /* If socket recv buffers overflows we drop data here
6386 * which is *bad* because L2CAP has to be reliable.
6387 * But we don't have any other choice. L2CAP doesn't
6388 * provide flow control mechanism. */
6390 if (chan->imtu < skb->len)
6393 if (!chan->ops->recv(chan, skb))
6397 case L2CAP_MODE_ERTM:
6398 case L2CAP_MODE_STREAMING:
6399 l2cap_data_rcv(chan, skb);
6403 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6411 l2cap_chan_unlock(chan);
6414 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6415 struct sk_buff *skb)
6417 struct hci_conn *hcon = conn->hcon;
6418 struct l2cap_chan *chan;
6420 if (hcon->type != ACL_LINK)
6423 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6427 BT_DBG("chan %p, len %d", chan, skb->len);
6429 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6432 if (chan->imtu < skb->len)
6435 if (!chan->ops->recv(chan, skb))
6442 static void l2cap_att_channel(struct l2cap_conn *conn,
6443 struct sk_buff *skb)
6445 struct hci_conn *hcon = conn->hcon;
6446 struct l2cap_chan *chan;
6448 if (hcon->type != LE_LINK)
6451 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6452 conn->src, conn->dst);
6456 BT_DBG("chan %p, len %d", chan, skb->len);
6458 if (chan->imtu < skb->len)
6461 if (!chan->ops->recv(chan, skb))
6468 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6470 struct l2cap_hdr *lh = (void *) skb->data;
6474 skb_pull(skb, L2CAP_HDR_SIZE);
6475 cid = __le16_to_cpu(lh->cid);
6476 len = __le16_to_cpu(lh->len);
6478 if (len != skb->len) {
6483 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6486 case L2CAP_CID_SIGNALING:
6487 l2cap_sig_channel(conn, skb);
6490 case L2CAP_CID_CONN_LESS:
6491 psm = get_unaligned((__le16 *) skb->data);
6492 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6493 l2cap_conless_channel(conn, psm, skb);
6497 l2cap_att_channel(conn, skb);
6500 case L2CAP_CID_LE_SIGNALING:
6501 l2cap_le_sig_channel(conn, skb);
6505 if (smp_sig_channel(conn, skb))
6506 l2cap_conn_del(conn->hcon, EACCES);
6510 l2cap_data_channel(conn, cid, skb);
6515 /* ---- L2CAP interface with lower layer (HCI) ---- */
6517 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6519 int exact = 0, lm1 = 0, lm2 = 0;
6520 struct l2cap_chan *c;
6522 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6524 /* Find listening sockets and check their link_mode */
6525 read_lock(&chan_list_lock);
6526 list_for_each_entry(c, &chan_list, global_l) {
6527 struct sock *sk = c->sk;
6529 if (c->state != BT_LISTEN)
6532 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6533 lm1 |= HCI_LM_ACCEPT;
6534 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6535 lm1 |= HCI_LM_MASTER;
6537 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6538 lm2 |= HCI_LM_ACCEPT;
6539 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6540 lm2 |= HCI_LM_MASTER;
6543 read_unlock(&chan_list_lock);
6545 return exact ? lm1 : lm2;
6548 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6550 struct l2cap_conn *conn;
6552 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6555 conn = l2cap_conn_add(hcon);
6557 l2cap_conn_ready(conn);
6559 l2cap_conn_del(hcon, bt_to_errno(status));
6563 int l2cap_disconn_ind(struct hci_conn *hcon)
6565 struct l2cap_conn *conn = hcon->l2cap_data;
6567 BT_DBG("hcon %p", hcon);
6570 return HCI_ERROR_REMOTE_USER_TERM;
6571 return conn->disc_reason;
6574 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6576 BT_DBG("hcon %p reason %d", hcon, reason);
6578 l2cap_conn_del(hcon, bt_to_errno(reason));
6581 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6583 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6586 if (encrypt == 0x00) {
6587 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6588 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6589 } else if (chan->sec_level == BT_SECURITY_HIGH)
6590 l2cap_chan_close(chan, ECONNREFUSED);
6592 if (chan->sec_level == BT_SECURITY_MEDIUM)
6593 __clear_chan_timer(chan);
6597 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6599 struct l2cap_conn *conn = hcon->l2cap_data;
6600 struct l2cap_chan *chan;
6605 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6607 if (hcon->type == LE_LINK) {
6608 if (!status && encrypt)
6609 smp_distribute_keys(conn, 0);
6610 cancel_delayed_work(&conn->security_timer);
6613 mutex_lock(&conn->chan_lock);
6615 list_for_each_entry(chan, &conn->chan_l, list) {
6616 l2cap_chan_lock(chan);
6618 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6619 state_to_string(chan->state));
6621 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6622 l2cap_chan_unlock(chan);
6626 if (chan->scid == L2CAP_CID_ATT) {
6627 if (!status && encrypt) {
6628 chan->sec_level = hcon->sec_level;
6629 l2cap_chan_ready(chan);
6632 l2cap_chan_unlock(chan);
6636 if (!__l2cap_no_conn_pending(chan)) {
6637 l2cap_chan_unlock(chan);
6641 if (!status && (chan->state == BT_CONNECTED ||
6642 chan->state == BT_CONFIG)) {
6643 struct sock *sk = chan->sk;
6645 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6646 sk->sk_state_change(sk);
6648 l2cap_check_encryption(chan, encrypt);
6649 l2cap_chan_unlock(chan);
6653 if (chan->state == BT_CONNECT) {
6655 l2cap_start_connection(chan);
6657 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6659 } else if (chan->state == BT_CONNECT2) {
6660 struct sock *sk = chan->sk;
6661 struct l2cap_conn_rsp rsp;
6667 if (test_bit(BT_SK_DEFER_SETUP,
6668 &bt_sk(sk)->flags)) {
6669 res = L2CAP_CR_PEND;
6670 stat = L2CAP_CS_AUTHOR_PEND;
6671 chan->ops->defer(chan);
6673 __l2cap_state_change(chan, BT_CONFIG);
6674 res = L2CAP_CR_SUCCESS;
6675 stat = L2CAP_CS_NO_INFO;
6678 __l2cap_state_change(chan, BT_DISCONN);
6679 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6680 res = L2CAP_CR_SEC_BLOCK;
6681 stat = L2CAP_CS_NO_INFO;
6686 rsp.scid = cpu_to_le16(chan->dcid);
6687 rsp.dcid = cpu_to_le16(chan->scid);
6688 rsp.result = cpu_to_le16(res);
6689 rsp.status = cpu_to_le16(stat);
6690 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6693 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6694 res == L2CAP_CR_SUCCESS) {
6696 set_bit(CONF_REQ_SENT, &chan->conf_state);
6697 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6699 l2cap_build_conf_req(chan, buf),
6701 chan->num_conf_req++;
6705 l2cap_chan_unlock(chan);
6708 mutex_unlock(&conn->chan_lock);
6713 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6715 struct l2cap_conn *conn = hcon->l2cap_data;
6716 struct l2cap_hdr *hdr;
6719 /* For AMP controller do not create l2cap conn */
6720 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6724 conn = l2cap_conn_add(hcon);
6729 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6733 case ACL_START_NO_FLUSH:
6736 BT_ERR("Unexpected start frame (len %d)", skb->len);
6737 kfree_skb(conn->rx_skb);
6738 conn->rx_skb = NULL;
6740 l2cap_conn_unreliable(conn, ECOMM);
6743 /* Start fragment always begin with Basic L2CAP header */
6744 if (skb->len < L2CAP_HDR_SIZE) {
6745 BT_ERR("Frame is too short (len %d)", skb->len);
6746 l2cap_conn_unreliable(conn, ECOMM);
6750 hdr = (struct l2cap_hdr *) skb->data;
6751 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6753 if (len == skb->len) {
6754 /* Complete frame received */
6755 l2cap_recv_frame(conn, skb);
6759 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6761 if (skb->len > len) {
6762 BT_ERR("Frame is too long (len %d, expected len %d)",
6764 l2cap_conn_unreliable(conn, ECOMM);
6768 /* Allocate skb for the complete frame (with header) */
6769 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6773 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6775 conn->rx_len = len - skb->len;
6779 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6781 if (!conn->rx_len) {
6782 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6783 l2cap_conn_unreliable(conn, ECOMM);
6787 if (skb->len > conn->rx_len) {
6788 BT_ERR("Fragment is too long (len %d, expected %d)",
6789 skb->len, conn->rx_len);
6790 kfree_skb(conn->rx_skb);
6791 conn->rx_skb = NULL;
6793 l2cap_conn_unreliable(conn, ECOMM);
6797 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6799 conn->rx_len -= skb->len;
6801 if (!conn->rx_len) {
6802 /* Complete frame received. l2cap_recv_frame
6803 * takes ownership of the skb so set the global
6804 * rx_skb pointer to NULL first.
6806 struct sk_buff *rx_skb = conn->rx_skb;
6807 conn->rx_skb = NULL;
6808 l2cap_recv_frame(conn, rx_skb);
6818 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6820 struct l2cap_chan *c;
6822 read_lock(&chan_list_lock);
6824 list_for_each_entry(c, &chan_list, global_l) {
6825 struct sock *sk = c->sk;
6827 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6828 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6829 c->state, __le16_to_cpu(c->psm),
6830 c->scid, c->dcid, c->imtu, c->omtu,
6831 c->sec_level, c->mode);
6834 read_unlock(&chan_list_lock);
6839 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6841 return single_open(file, l2cap_debugfs_show, inode->i_private);
6844 static const struct file_operations l2cap_debugfs_fops = {
6845 .open = l2cap_debugfs_open,
6847 .llseek = seq_lseek,
6848 .release = single_release,
6851 static struct dentry *l2cap_debugfs;
6853 int __init l2cap_init(void)
6857 err = l2cap_init_sockets();
6862 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6863 NULL, &l2cap_debugfs_fops);
6865 BT_ERR("Failed to create L2CAP debug file");
6871 void l2cap_exit(void)
6873 debugfs_remove(l2cap_debugfs);
6874 l2cap_cleanup_sockets();
6877 module_param(disable_ertm, bool, 0644);
6878 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");