2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
218 if (conn->hcon->type == LE_LINK)
219 dyn_end = L2CAP_CID_LE_DYN_END;
221 dyn_end = L2CAP_CID_DYN_END;
223 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
224 if (!__l2cap_get_chan_by_scid(conn, cid))
231 static void l2cap_state_change(struct l2cap_chan *chan, int state)
233 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
234 state_to_string(state));
237 chan->ops->state_change(chan, state, 0);
240 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
244 chan->ops->state_change(chan, chan->state, err);
247 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
249 chan->ops->state_change(chan, chan->state, err);
252 static void __set_retrans_timer(struct l2cap_chan *chan)
254 if (!delayed_work_pending(&chan->monitor_timer) &&
255 chan->retrans_timeout) {
256 l2cap_set_timer(chan, &chan->retrans_timer,
257 msecs_to_jiffies(chan->retrans_timeout));
261 static void __set_monitor_timer(struct l2cap_chan *chan)
263 __clear_retrans_timer(chan);
264 if (chan->monitor_timeout) {
265 l2cap_set_timer(chan, &chan->monitor_timer,
266 msecs_to_jiffies(chan->monitor_timeout));
270 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
275 skb_queue_walk(head, skb) {
276 if (bt_cb(skb)->control.txseq == seq)
283 /* ---- L2CAP sequence number lists ---- */
285 /* For ERTM, ordered lists of sequence numbers must be tracked for
286 * SREJ requests that are received and for frames that are to be
287 * retransmitted. These seq_list functions implement a singly-linked
288 * list in an array, where membership in the list can also be checked
289 * in constant time. Items can also be added to the tail of the list
290 * and removed from the head in constant time, without further memory
294 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
296 size_t alloc_size, i;
298 /* Allocated size is a power of 2 to map sequence numbers
299 * (which may be up to 14 bits) in to a smaller array that is
300 * sized for the negotiated ERTM transmit windows.
302 alloc_size = roundup_pow_of_two(size);
304 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
308 seq_list->mask = alloc_size - 1;
309 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
310 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
311 for (i = 0; i < alloc_size; i++)
312 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
317 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
319 kfree(seq_list->list);
322 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
325 /* Constant-time check for list membership */
326 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
329 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
331 u16 mask = seq_list->mask;
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
334 /* In case someone tries to pop the head of an empty list */
335 return L2CAP_SEQ_LIST_CLEAR;
336 } else if (seq_list->head == seq) {
337 /* Head can be removed in constant time */
338 seq_list->head = seq_list->list[seq & mask];
339 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
341 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
342 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
343 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 /* Walk the list to find the sequence number */
347 u16 prev = seq_list->head;
348 while (seq_list->list[prev & mask] != seq) {
349 prev = seq_list->list[prev & mask];
350 if (prev == L2CAP_SEQ_LIST_TAIL)
351 return L2CAP_SEQ_LIST_CLEAR;
354 /* Unlink the number from the list and clear it */
355 seq_list->list[prev & mask] = seq_list->list[seq & mask];
356 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
357 if (seq_list->tail == seq)
358 seq_list->tail = prev;
363 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
365 /* Remove the head in constant time */
366 return l2cap_seq_list_remove(seq_list, seq_list->head);
369 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
376 for (i = 0; i <= seq_list->mask; i++)
377 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
379 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
380 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
383 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
385 u16 mask = seq_list->mask;
387 /* All appends happen in constant time */
389 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
392 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
393 seq_list->head = seq;
395 seq_list->list[seq_list->tail & mask] = seq;
397 seq_list->tail = seq;
398 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
401 static void l2cap_chan_timeout(struct work_struct *work)
403 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
405 struct l2cap_conn *conn = chan->conn;
408 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
410 mutex_lock(&conn->chan_lock);
411 l2cap_chan_lock(chan);
413 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
414 reason = ECONNREFUSED;
415 else if (chan->state == BT_CONNECT &&
416 chan->sec_level != BT_SECURITY_SDP)
417 reason = ECONNREFUSED;
421 l2cap_chan_close(chan, reason);
423 l2cap_chan_unlock(chan);
425 chan->ops->close(chan);
426 mutex_unlock(&conn->chan_lock);
428 l2cap_chan_put(chan);
431 struct l2cap_chan *l2cap_chan_create(void)
433 struct l2cap_chan *chan;
435 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 mutex_init(&chan->lock);
441 write_lock(&chan_list_lock);
442 list_add(&chan->global_l, &chan_list);
443 write_unlock(&chan_list_lock);
445 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
447 chan->state = BT_OPEN;
449 kref_init(&chan->kref);
451 /* This flag is cleared in l2cap_chan_ready() */
452 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
454 BT_DBG("chan %p", chan);
459 static void l2cap_chan_destroy(struct kref *kref)
461 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
463 BT_DBG("chan %p", chan);
465 write_lock(&chan_list_lock);
466 list_del(&chan->global_l);
467 write_unlock(&chan_list_lock);
472 void l2cap_chan_hold(struct l2cap_chan *c)
474 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
479 void l2cap_chan_put(struct l2cap_chan *c)
481 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
483 kref_put(&c->kref, l2cap_chan_destroy);
486 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
488 chan->fcs = L2CAP_FCS_CRC16;
489 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
492 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
493 chan->sec_level = BT_SECURITY_LOW;
495 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
498 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
500 chan->imtu = L2CAP_DEFAULT_MTU;
501 chan->omtu = L2CAP_LE_MIN_MTU;
502 chan->mode = L2CAP_MODE_LE_FLOWCTL;
503 chan->tx_credits = 0;
504 chan->rx_credits = L2CAP_LE_MAX_CREDITS;
506 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
507 chan->mps = chan->imtu;
509 chan->mps = L2CAP_LE_DEFAULT_MPS;
512 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
514 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
515 __le16_to_cpu(chan->psm), chan->dcid);
517 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
521 switch (chan->chan_type) {
522 case L2CAP_CHAN_CONN_ORIENTED:
523 if (conn->hcon->type == LE_LINK) {
524 if (chan->dcid == L2CAP_CID_ATT) {
525 chan->omtu = L2CAP_DEFAULT_MTU;
526 chan->scid = L2CAP_CID_ATT;
528 chan->scid = l2cap_alloc_cid(conn);
531 /* Alloc CID for connection-oriented socket */
532 chan->scid = l2cap_alloc_cid(conn);
533 chan->omtu = L2CAP_DEFAULT_MTU;
537 case L2CAP_CHAN_CONN_LESS:
538 /* Connectionless socket */
539 chan->scid = L2CAP_CID_CONN_LESS;
540 chan->dcid = L2CAP_CID_CONN_LESS;
541 chan->omtu = L2CAP_DEFAULT_MTU;
544 case L2CAP_CHAN_CONN_FIX_A2MP:
545 chan->scid = L2CAP_CID_A2MP;
546 chan->dcid = L2CAP_CID_A2MP;
547 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
548 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
552 /* Raw socket can send/recv signalling messages only */
553 chan->scid = L2CAP_CID_SIGNALING;
554 chan->dcid = L2CAP_CID_SIGNALING;
555 chan->omtu = L2CAP_DEFAULT_MTU;
558 chan->local_id = L2CAP_BESTEFFORT_ID;
559 chan->local_stype = L2CAP_SERV_BESTEFFORT;
560 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
561 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
562 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
563 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
565 l2cap_chan_hold(chan);
567 hci_conn_hold(conn->hcon);
569 list_add(&chan->list, &conn->chan_l);
572 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
574 mutex_lock(&conn->chan_lock);
575 __l2cap_chan_add(conn, chan);
576 mutex_unlock(&conn->chan_lock);
579 void l2cap_chan_del(struct l2cap_chan *chan, int err)
581 struct l2cap_conn *conn = chan->conn;
583 __clear_chan_timer(chan);
585 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
589 /* Delete from channel list */
590 list_del(&chan->list);
592 l2cap_chan_put(chan);
596 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
597 hci_conn_drop(conn->hcon);
599 if (mgr && mgr->bredr_chan == chan)
600 mgr->bredr_chan = NULL;
603 if (chan->hs_hchan) {
604 struct hci_chan *hs_hchan = chan->hs_hchan;
606 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
607 amp_disconnect_logical_link(hs_hchan);
610 chan->ops->teardown(chan, err);
612 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
616 case L2CAP_MODE_BASIC:
619 case L2CAP_MODE_LE_FLOWCTL:
620 skb_queue_purge(&chan->tx_q);
623 case L2CAP_MODE_ERTM:
624 __clear_retrans_timer(chan);
625 __clear_monitor_timer(chan);
626 __clear_ack_timer(chan);
628 skb_queue_purge(&chan->srej_q);
630 l2cap_seq_list_free(&chan->srej_list);
631 l2cap_seq_list_free(&chan->retrans_list);
635 case L2CAP_MODE_STREAMING:
636 skb_queue_purge(&chan->tx_q);
643 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
645 struct l2cap_conn *conn = chan->conn;
646 struct l2cap_le_conn_rsp rsp;
649 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
650 result = L2CAP_CR_AUTHORIZATION;
652 result = L2CAP_CR_BAD_PSM;
654 l2cap_state_change(chan, BT_DISCONN);
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.mtu = cpu_to_le16(chan->imtu);
658 rsp.mps = cpu_to_le16(chan->mps);
659 rsp.credits = cpu_to_le16(chan->rx_credits);
660 rsp.result = cpu_to_le16(result);
662 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
666 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_conn_rsp rsp;
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_SEC_BLOCK;
675 result = L2CAP_CR_BAD_PSM;
677 l2cap_state_change(chan, BT_DISCONN);
679 rsp.scid = cpu_to_le16(chan->dcid);
680 rsp.dcid = cpu_to_le16(chan->scid);
681 rsp.result = cpu_to_le16(result);
682 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
684 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
687 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
689 struct l2cap_conn *conn = chan->conn;
691 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
693 switch (chan->state) {
695 chan->ops->teardown(chan, 0);
700 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
701 * check for chan->psm.
703 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
704 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
705 l2cap_send_disconn_req(chan, reason);
707 l2cap_chan_del(chan, reason);
711 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
712 if (conn->hcon->type == ACL_LINK)
713 l2cap_chan_connect_reject(chan);
714 else if (conn->hcon->type == LE_LINK)
715 l2cap_chan_le_connect_reject(chan);
718 l2cap_chan_del(chan, reason);
723 l2cap_chan_del(chan, reason);
727 chan->ops->teardown(chan, 0);
732 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
734 switch (chan->chan_type) {
736 switch (chan->sec_level) {
737 case BT_SECURITY_HIGH:
738 return HCI_AT_DEDICATED_BONDING_MITM;
739 case BT_SECURITY_MEDIUM:
740 return HCI_AT_DEDICATED_BONDING;
742 return HCI_AT_NO_BONDING;
745 case L2CAP_CHAN_CONN_LESS:
746 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
747 if (chan->sec_level == BT_SECURITY_LOW)
748 chan->sec_level = BT_SECURITY_SDP;
750 if (chan->sec_level == BT_SECURITY_HIGH)
751 return HCI_AT_NO_BONDING_MITM;
753 return HCI_AT_NO_BONDING;
755 case L2CAP_CHAN_CONN_ORIENTED:
756 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
757 if (chan->sec_level == BT_SECURITY_LOW)
758 chan->sec_level = BT_SECURITY_SDP;
760 if (chan->sec_level == BT_SECURITY_HIGH)
761 return HCI_AT_NO_BONDING_MITM;
763 return HCI_AT_NO_BONDING;
767 switch (chan->sec_level) {
768 case BT_SECURITY_HIGH:
769 return HCI_AT_GENERAL_BONDING_MITM;
770 case BT_SECURITY_MEDIUM:
771 return HCI_AT_GENERAL_BONDING;
773 return HCI_AT_NO_BONDING;
779 /* Service level security */
780 int l2cap_chan_check_security(struct l2cap_chan *chan)
782 struct l2cap_conn *conn = chan->conn;
785 if (conn->hcon->type == LE_LINK)
786 return smp_conn_security(conn->hcon, chan->sec_level);
788 auth_type = l2cap_get_auth_type(chan);
790 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
793 static u8 l2cap_get_ident(struct l2cap_conn *conn)
797 /* Get next available identificator.
798 * 1 - 128 are used by kernel.
799 * 129 - 199 are reserved.
800 * 200 - 254 are used by utilities like l2ping, etc.
803 spin_lock(&conn->lock);
805 if (++conn->tx_ident > 128)
810 spin_unlock(&conn->lock);
815 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
818 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
821 BT_DBG("code 0x%2.2x", code);
826 if (lmp_no_flush_capable(conn->hcon->hdev))
827 flags = ACL_START_NO_FLUSH;
831 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
832 skb->priority = HCI_PRIO_MAX;
834 hci_send_acl(conn->hchan, skb, flags);
837 static bool __chan_is_moving(struct l2cap_chan *chan)
839 return chan->move_state != L2CAP_MOVE_STABLE &&
840 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
843 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
845 struct hci_conn *hcon = chan->conn->hcon;
848 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
851 if (chan->hs_hcon && !__chan_is_moving(chan)) {
853 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
860 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
861 lmp_no_flush_capable(hcon->hdev))
862 flags = ACL_START_NO_FLUSH;
866 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
867 hci_send_acl(chan->conn->hchan, skb, flags);
870 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
872 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
873 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
875 if (enh & L2CAP_CTRL_FRAME_TYPE) {
878 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
879 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
886 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
887 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
894 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
896 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
897 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
899 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
902 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
903 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
910 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
911 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
918 static inline void __unpack_control(struct l2cap_chan *chan,
921 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
922 __unpack_extended_control(get_unaligned_le32(skb->data),
923 &bt_cb(skb)->control);
924 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
926 __unpack_enhanced_control(get_unaligned_le16(skb->data),
927 &bt_cb(skb)->control);
928 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
932 static u32 __pack_extended_control(struct l2cap_ctrl *control)
936 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
937 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
939 if (control->sframe) {
940 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
941 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
942 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
944 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
945 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
951 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
955 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
956 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
958 if (control->sframe) {
959 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
960 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
961 packed |= L2CAP_CTRL_FRAME_TYPE;
963 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
964 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
970 static inline void __pack_control(struct l2cap_chan *chan,
971 struct l2cap_ctrl *control,
974 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
975 put_unaligned_le32(__pack_extended_control(control),
976 skb->data + L2CAP_HDR_SIZE);
978 put_unaligned_le16(__pack_enhanced_control(control),
979 skb->data + L2CAP_HDR_SIZE);
983 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
985 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
986 return L2CAP_EXT_HDR_SIZE;
988 return L2CAP_ENH_HDR_SIZE;
991 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
995 struct l2cap_hdr *lh;
996 int hlen = __ertm_hdr_size(chan);
998 if (chan->fcs == L2CAP_FCS_CRC16)
999 hlen += L2CAP_FCS_SIZE;
1001 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1004 return ERR_PTR(-ENOMEM);
1006 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1007 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1008 lh->cid = cpu_to_le16(chan->dcid);
1010 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1011 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1013 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1015 if (chan->fcs == L2CAP_FCS_CRC16) {
1016 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1017 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1020 skb->priority = HCI_PRIO_MAX;
1024 static void l2cap_send_sframe(struct l2cap_chan *chan,
1025 struct l2cap_ctrl *control)
1027 struct sk_buff *skb;
1030 BT_DBG("chan %p, control %p", chan, control);
1032 if (!control->sframe)
1035 if (__chan_is_moving(chan))
1038 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1042 if (control->super == L2CAP_SUPER_RR)
1043 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1044 else if (control->super == L2CAP_SUPER_RNR)
1045 set_bit(CONN_RNR_SENT, &chan->conn_state);
1047 if (control->super != L2CAP_SUPER_SREJ) {
1048 chan->last_acked_seq = control->reqseq;
1049 __clear_ack_timer(chan);
1052 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1053 control->final, control->poll, control->super);
1055 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1056 control_field = __pack_extended_control(control);
1058 control_field = __pack_enhanced_control(control);
1060 skb = l2cap_create_sframe_pdu(chan, control_field);
1062 l2cap_do_send(chan, skb);
1065 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1067 struct l2cap_ctrl control;
1069 BT_DBG("chan %p, poll %d", chan, poll);
1071 memset(&control, 0, sizeof(control));
1073 control.poll = poll;
1075 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1076 control.super = L2CAP_SUPER_RNR;
1078 control.super = L2CAP_SUPER_RR;
1080 control.reqseq = chan->buffer_seq;
1081 l2cap_send_sframe(chan, &control);
1084 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1086 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1089 static bool __amp_capable(struct l2cap_chan *chan)
1091 struct l2cap_conn *conn = chan->conn;
1092 struct hci_dev *hdev;
1093 bool amp_available = false;
1095 if (!conn->hs_enabled)
1098 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1101 read_lock(&hci_dev_list_lock);
1102 list_for_each_entry(hdev, &hci_dev_list, list) {
1103 if (hdev->amp_type != AMP_TYPE_BREDR &&
1104 test_bit(HCI_UP, &hdev->flags)) {
1105 amp_available = true;
1109 read_unlock(&hci_dev_list_lock);
1111 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1112 return amp_available;
1117 static bool l2cap_check_efs(struct l2cap_chan *chan)
1119 /* Check EFS parameters */
1123 void l2cap_send_conn_req(struct l2cap_chan *chan)
1125 struct l2cap_conn *conn = chan->conn;
1126 struct l2cap_conn_req req;
1128 req.scid = cpu_to_le16(chan->scid);
1129 req.psm = chan->psm;
1131 chan->ident = l2cap_get_ident(conn);
1133 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1135 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1138 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1140 struct l2cap_create_chan_req req;
1141 req.scid = cpu_to_le16(chan->scid);
1142 req.psm = chan->psm;
1143 req.amp_id = amp_id;
1145 chan->ident = l2cap_get_ident(chan->conn);
1147 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1151 static void l2cap_move_setup(struct l2cap_chan *chan)
1153 struct sk_buff *skb;
1155 BT_DBG("chan %p", chan);
1157 if (chan->mode != L2CAP_MODE_ERTM)
1160 __clear_retrans_timer(chan);
1161 __clear_monitor_timer(chan);
1162 __clear_ack_timer(chan);
1164 chan->retry_count = 0;
1165 skb_queue_walk(&chan->tx_q, skb) {
1166 if (bt_cb(skb)->control.retries)
1167 bt_cb(skb)->control.retries = 1;
1172 chan->expected_tx_seq = chan->buffer_seq;
1174 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1175 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1176 l2cap_seq_list_clear(&chan->retrans_list);
1177 l2cap_seq_list_clear(&chan->srej_list);
1178 skb_queue_purge(&chan->srej_q);
1180 chan->tx_state = L2CAP_TX_STATE_XMIT;
1181 chan->rx_state = L2CAP_RX_STATE_MOVE;
1183 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1186 static void l2cap_move_done(struct l2cap_chan *chan)
1188 u8 move_role = chan->move_role;
1189 BT_DBG("chan %p", chan);
1191 chan->move_state = L2CAP_MOVE_STABLE;
1192 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1194 if (chan->mode != L2CAP_MODE_ERTM)
1197 switch (move_role) {
1198 case L2CAP_MOVE_ROLE_INITIATOR:
1199 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1200 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1202 case L2CAP_MOVE_ROLE_RESPONDER:
1203 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1208 static void l2cap_le_flowctl_start(struct l2cap_chan *chan)
1211 chan->sdu_last_frag = NULL;
1214 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
1215 chan->mps = chan->imtu;
1217 chan->mps = L2CAP_LE_DEFAULT_MPS;
1219 skb_queue_head_init(&chan->tx_q);
1221 if (!chan->tx_credits)
1222 chan->ops->suspend(chan);
1225 static void l2cap_chan_ready(struct l2cap_chan *chan)
1227 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1228 chan->conf_state = 0;
1229 __clear_chan_timer(chan);
1231 if (chan->mode == L2CAP_MODE_LE_FLOWCTL)
1232 l2cap_le_flowctl_start(chan);
1234 chan->state = BT_CONNECTED;
1236 chan->ops->ready(chan);
1239 static void l2cap_le_connect(struct l2cap_chan *chan)
1241 struct l2cap_conn *conn = chan->conn;
1242 struct l2cap_le_conn_req req;
1244 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1247 req.psm = chan->psm;
1248 req.scid = cpu_to_le16(chan->scid);
1249 req.mtu = cpu_to_le16(chan->imtu);
1250 req.mps = cpu_to_le16(chan->mps);
1251 req.credits = cpu_to_le16(chan->rx_credits);
1253 chan->ident = l2cap_get_ident(conn);
1255 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1259 static void l2cap_le_start(struct l2cap_chan *chan)
1261 struct l2cap_conn *conn = chan->conn;
1263 if (!smp_conn_security(conn->hcon, chan->sec_level))
1267 l2cap_chan_ready(chan);
1271 if (chan->state == BT_CONNECT)
1272 l2cap_le_connect(chan);
1275 static void l2cap_start_connection(struct l2cap_chan *chan)
1277 if (__amp_capable(chan)) {
1278 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1279 a2mp_discover_amp(chan);
1280 } else if (chan->conn->hcon->type == LE_LINK) {
1281 l2cap_le_start(chan);
1283 l2cap_send_conn_req(chan);
1287 static void l2cap_do_start(struct l2cap_chan *chan)
1289 struct l2cap_conn *conn = chan->conn;
1291 if (conn->hcon->type == LE_LINK) {
1292 l2cap_le_start(chan);
1296 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1297 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1300 if (l2cap_chan_check_security(chan) &&
1301 __l2cap_no_conn_pending(chan)) {
1302 l2cap_start_connection(chan);
1305 struct l2cap_info_req req;
1306 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1308 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1309 conn->info_ident = l2cap_get_ident(conn);
1311 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1313 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1318 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1320 u32 local_feat_mask = l2cap_feat_mask;
1322 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1325 case L2CAP_MODE_ERTM:
1326 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1327 case L2CAP_MODE_STREAMING:
1328 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1334 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1336 struct l2cap_conn *conn = chan->conn;
1337 struct l2cap_disconn_req req;
1342 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1343 __clear_retrans_timer(chan);
1344 __clear_monitor_timer(chan);
1345 __clear_ack_timer(chan);
1348 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1349 l2cap_state_change(chan, BT_DISCONN);
1353 req.dcid = cpu_to_le16(chan->dcid);
1354 req.scid = cpu_to_le16(chan->scid);
1355 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1358 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1361 /* ---- L2CAP connections ---- */
1362 static void l2cap_conn_start(struct l2cap_conn *conn)
1364 struct l2cap_chan *chan, *tmp;
1366 BT_DBG("conn %p", conn);
1368 mutex_lock(&conn->chan_lock);
1370 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1371 l2cap_chan_lock(chan);
1373 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1374 l2cap_chan_unlock(chan);
1378 if (chan->state == BT_CONNECT) {
1379 if (!l2cap_chan_check_security(chan) ||
1380 !__l2cap_no_conn_pending(chan)) {
1381 l2cap_chan_unlock(chan);
1385 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1386 && test_bit(CONF_STATE2_DEVICE,
1387 &chan->conf_state)) {
1388 l2cap_chan_close(chan, ECONNRESET);
1389 l2cap_chan_unlock(chan);
1393 l2cap_start_connection(chan);
1395 } else if (chan->state == BT_CONNECT2) {
1396 struct l2cap_conn_rsp rsp;
1398 rsp.scid = cpu_to_le16(chan->dcid);
1399 rsp.dcid = cpu_to_le16(chan->scid);
1401 if (l2cap_chan_check_security(chan)) {
1402 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1403 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1404 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1405 chan->ops->defer(chan);
1408 l2cap_state_change(chan, BT_CONFIG);
1409 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1410 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1413 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1414 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1417 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1420 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1421 rsp.result != L2CAP_CR_SUCCESS) {
1422 l2cap_chan_unlock(chan);
1426 set_bit(CONF_REQ_SENT, &chan->conf_state);
1427 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1428 l2cap_build_conf_req(chan, buf), buf);
1429 chan->num_conf_req++;
1432 l2cap_chan_unlock(chan);
1435 mutex_unlock(&conn->chan_lock);
1438 /* Find socket with cid and source/destination bdaddr.
1439 * Returns closest match, locked.
1441 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1445 struct l2cap_chan *c, *c1 = NULL;
1447 read_lock(&chan_list_lock);
1449 list_for_each_entry(c, &chan_list, global_l) {
1450 if (state && c->state != state)
1453 if (c->scid == cid) {
1454 int src_match, dst_match;
1455 int src_any, dst_any;
1458 src_match = !bacmp(&c->src, src);
1459 dst_match = !bacmp(&c->dst, dst);
1460 if (src_match && dst_match) {
1461 read_unlock(&chan_list_lock);
1466 src_any = !bacmp(&c->src, BDADDR_ANY);
1467 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1468 if ((src_match && dst_any) || (src_any && dst_match) ||
1469 (src_any && dst_any))
1474 read_unlock(&chan_list_lock);
1479 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1481 struct hci_conn *hcon = conn->hcon;
1482 struct l2cap_chan *chan, *pchan;
1487 /* Check if we have socket listening on cid */
1488 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1489 &hcon->src, &hcon->dst);
1493 /* Client ATT sockets should override the server one */
1494 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1497 dst_type = bdaddr_type(hcon, hcon->dst_type);
1499 /* If device is blocked, do not create a channel for it */
1500 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1503 l2cap_chan_lock(pchan);
1505 chan = pchan->ops->new_connection(pchan);
1509 chan->dcid = L2CAP_CID_ATT;
1511 bacpy(&chan->src, &hcon->src);
1512 bacpy(&chan->dst, &hcon->dst);
1513 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1514 chan->dst_type = dst_type;
1516 __l2cap_chan_add(conn, chan);
1519 l2cap_chan_unlock(pchan);
1522 static void l2cap_conn_ready(struct l2cap_conn *conn)
1524 struct l2cap_chan *chan;
1525 struct hci_conn *hcon = conn->hcon;
1527 BT_DBG("conn %p", conn);
1529 /* For outgoing pairing which doesn't necessarily have an
1530 * associated socket (e.g. mgmt_pair_device).
1532 if (hcon->out && hcon->type == LE_LINK)
1533 smp_conn_security(hcon, hcon->pending_sec_level);
1535 mutex_lock(&conn->chan_lock);
1537 if (hcon->type == LE_LINK)
1538 l2cap_le_conn_ready(conn);
1540 list_for_each_entry(chan, &conn->chan_l, list) {
1542 l2cap_chan_lock(chan);
1544 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1545 l2cap_chan_unlock(chan);
1549 if (hcon->type == LE_LINK) {
1550 l2cap_le_start(chan);
1551 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1552 l2cap_chan_ready(chan);
1554 } else if (chan->state == BT_CONNECT) {
1555 l2cap_do_start(chan);
1558 l2cap_chan_unlock(chan);
1561 mutex_unlock(&conn->chan_lock);
1564 /* Notify sockets that we cannot guaranty reliability anymore */
1565 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1567 struct l2cap_chan *chan;
1569 BT_DBG("conn %p", conn);
1571 mutex_lock(&conn->chan_lock);
1573 list_for_each_entry(chan, &conn->chan_l, list) {
1574 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1575 l2cap_chan_set_err(chan, err);
1578 mutex_unlock(&conn->chan_lock);
1581 static void l2cap_info_timeout(struct work_struct *work)
1583 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1586 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1587 conn->info_ident = 0;
1589 l2cap_conn_start(conn);
1594 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1595 * callback is called during registration. The ->remove callback is called
1596 * during unregistration.
1597 * An l2cap_user object can either be explicitly unregistered or when the
1598 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1599 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1600 * External modules must own a reference to the l2cap_conn object if they intend
1601 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1602 * any time if they don't.
1605 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1607 struct hci_dev *hdev = conn->hcon->hdev;
1610 /* We need to check whether l2cap_conn is registered. If it is not, we
1611 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1612 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1613 * relies on the parent hci_conn object to be locked. This itself relies
1614 * on the hci_dev object to be locked. So we must lock the hci device
1619 if (user->list.next || user->list.prev) {
1624 /* conn->hchan is NULL after l2cap_conn_del() was called */
1630 ret = user->probe(conn, user);
1634 list_add(&user->list, &conn->users);
1638 hci_dev_unlock(hdev);
1641 EXPORT_SYMBOL(l2cap_register_user);
1643 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1645 struct hci_dev *hdev = conn->hcon->hdev;
1649 if (!user->list.next || !user->list.prev)
1652 list_del(&user->list);
1653 user->list.next = NULL;
1654 user->list.prev = NULL;
1655 user->remove(conn, user);
1658 hci_dev_unlock(hdev);
1660 EXPORT_SYMBOL(l2cap_unregister_user);
1662 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1664 struct l2cap_user *user;
1666 while (!list_empty(&conn->users)) {
1667 user = list_first_entry(&conn->users, struct l2cap_user, list);
1668 list_del(&user->list);
1669 user->list.next = NULL;
1670 user->list.prev = NULL;
1671 user->remove(conn, user);
1675 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1677 struct l2cap_conn *conn = hcon->l2cap_data;
1678 struct l2cap_chan *chan, *l;
1683 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1685 kfree_skb(conn->rx_skb);
1687 l2cap_unregister_all_users(conn);
1689 mutex_lock(&conn->chan_lock);
1692 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1693 l2cap_chan_hold(chan);
1694 l2cap_chan_lock(chan);
1696 l2cap_chan_del(chan, err);
1698 l2cap_chan_unlock(chan);
1700 chan->ops->close(chan);
1701 l2cap_chan_put(chan);
1704 mutex_unlock(&conn->chan_lock);
1706 hci_chan_del(conn->hchan);
1708 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1709 cancel_delayed_work_sync(&conn->info_timer);
1711 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1712 cancel_delayed_work_sync(&conn->security_timer);
1713 smp_chan_destroy(conn);
1716 hcon->l2cap_data = NULL;
1718 l2cap_conn_put(conn);
1721 static void security_timeout(struct work_struct *work)
1723 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1724 security_timer.work);
1726 BT_DBG("conn %p", conn);
1728 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1729 smp_chan_destroy(conn);
1730 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1734 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1736 struct l2cap_conn *conn = hcon->l2cap_data;
1737 struct hci_chan *hchan;
1742 hchan = hci_chan_create(hcon);
1746 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1748 hci_chan_del(hchan);
1752 kref_init(&conn->ref);
1753 hcon->l2cap_data = conn;
1755 hci_conn_get(conn->hcon);
1756 conn->hchan = hchan;
1758 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1760 switch (hcon->type) {
1762 if (hcon->hdev->le_mtu) {
1763 conn->mtu = hcon->hdev->le_mtu;
1768 conn->mtu = hcon->hdev->acl_mtu;
1772 conn->feat_mask = 0;
1774 if (hcon->type == ACL_LINK)
1775 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1776 &hcon->hdev->dev_flags);
1778 spin_lock_init(&conn->lock);
1779 mutex_init(&conn->chan_lock);
1781 INIT_LIST_HEAD(&conn->chan_l);
1782 INIT_LIST_HEAD(&conn->users);
1784 if (hcon->type == LE_LINK)
1785 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1787 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1789 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1794 static void l2cap_conn_free(struct kref *ref)
1796 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1798 hci_conn_put(conn->hcon);
1802 void l2cap_conn_get(struct l2cap_conn *conn)
1804 kref_get(&conn->ref);
1806 EXPORT_SYMBOL(l2cap_conn_get);
1808 void l2cap_conn_put(struct l2cap_conn *conn)
1810 kref_put(&conn->ref, l2cap_conn_free);
1812 EXPORT_SYMBOL(l2cap_conn_put);
1814 /* ---- Socket interface ---- */
1816 /* Find socket with psm and source / destination bdaddr.
1817 * Returns closest match.
1819 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1824 struct l2cap_chan *c, *c1 = NULL;
1826 read_lock(&chan_list_lock);
1828 list_for_each_entry(c, &chan_list, global_l) {
1829 if (state && c->state != state)
1832 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1835 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1838 if (c->psm == psm) {
1839 int src_match, dst_match;
1840 int src_any, dst_any;
1843 src_match = !bacmp(&c->src, src);
1844 dst_match = !bacmp(&c->dst, dst);
1845 if (src_match && dst_match) {
1846 read_unlock(&chan_list_lock);
1851 src_any = !bacmp(&c->src, BDADDR_ANY);
1852 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1853 if ((src_match && dst_any) || (src_any && dst_match) ||
1854 (src_any && dst_any))
1859 read_unlock(&chan_list_lock);
1864 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1865 bdaddr_t *dst, u8 dst_type)
1867 struct l2cap_conn *conn;
1868 struct hci_conn *hcon;
1869 struct hci_dev *hdev;
1873 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1874 dst_type, __le16_to_cpu(psm));
1876 hdev = hci_get_route(dst, &chan->src);
1878 return -EHOSTUNREACH;
1882 l2cap_chan_lock(chan);
1884 /* PSM must be odd and lsb of upper byte must be 0 */
1885 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1886 chan->chan_type != L2CAP_CHAN_RAW) {
1891 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1896 switch (chan->mode) {
1897 case L2CAP_MODE_BASIC:
1898 case L2CAP_MODE_LE_FLOWCTL:
1900 case L2CAP_MODE_ERTM:
1901 case L2CAP_MODE_STREAMING:
1910 switch (chan->state) {
1914 /* Already connecting */
1919 /* Already connected */
1933 /* Set destination address and psm */
1934 bacpy(&chan->dst, dst);
1935 chan->dst_type = dst_type;
1940 auth_type = l2cap_get_auth_type(chan);
1942 if (bdaddr_type_is_le(dst_type))
1943 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1944 chan->sec_level, auth_type);
1946 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1947 chan->sec_level, auth_type);
1950 err = PTR_ERR(hcon);
1954 conn = l2cap_conn_add(hcon);
1956 hci_conn_drop(hcon);
1961 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1962 hci_conn_drop(hcon);
1967 /* Update source addr of the socket */
1968 bacpy(&chan->src, &hcon->src);
1969 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1971 l2cap_chan_unlock(chan);
1972 l2cap_chan_add(conn, chan);
1973 l2cap_chan_lock(chan);
1975 /* l2cap_chan_add takes its own ref so we can drop this one */
1976 hci_conn_drop(hcon);
1978 l2cap_state_change(chan, BT_CONNECT);
1979 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1981 if (hcon->state == BT_CONNECTED) {
1982 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1983 __clear_chan_timer(chan);
1984 if (l2cap_chan_check_security(chan))
1985 l2cap_state_change(chan, BT_CONNECTED);
1987 l2cap_do_start(chan);
1993 l2cap_chan_unlock(chan);
1994 hci_dev_unlock(hdev);
1999 static void l2cap_monitor_timeout(struct work_struct *work)
2001 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2002 monitor_timer.work);
2004 BT_DBG("chan %p", chan);
2006 l2cap_chan_lock(chan);
2009 l2cap_chan_unlock(chan);
2010 l2cap_chan_put(chan);
2014 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2016 l2cap_chan_unlock(chan);
2017 l2cap_chan_put(chan);
2020 static void l2cap_retrans_timeout(struct work_struct *work)
2022 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2023 retrans_timer.work);
2025 BT_DBG("chan %p", chan);
2027 l2cap_chan_lock(chan);
2030 l2cap_chan_unlock(chan);
2031 l2cap_chan_put(chan);
2035 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2036 l2cap_chan_unlock(chan);
2037 l2cap_chan_put(chan);
2040 static void l2cap_streaming_send(struct l2cap_chan *chan,
2041 struct sk_buff_head *skbs)
2043 struct sk_buff *skb;
2044 struct l2cap_ctrl *control;
2046 BT_DBG("chan %p, skbs %p", chan, skbs);
2048 if (__chan_is_moving(chan))
2051 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2053 while (!skb_queue_empty(&chan->tx_q)) {
2055 skb = skb_dequeue(&chan->tx_q);
2057 bt_cb(skb)->control.retries = 1;
2058 control = &bt_cb(skb)->control;
2060 control->reqseq = 0;
2061 control->txseq = chan->next_tx_seq;
2063 __pack_control(chan, control, skb);
2065 if (chan->fcs == L2CAP_FCS_CRC16) {
2066 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2067 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2070 l2cap_do_send(chan, skb);
2072 BT_DBG("Sent txseq %u", control->txseq);
2074 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2075 chan->frames_sent++;
2079 static int l2cap_ertm_send(struct l2cap_chan *chan)
2081 struct sk_buff *skb, *tx_skb;
2082 struct l2cap_ctrl *control;
2085 BT_DBG("chan %p", chan);
2087 if (chan->state != BT_CONNECTED)
2090 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2093 if (__chan_is_moving(chan))
2096 while (chan->tx_send_head &&
2097 chan->unacked_frames < chan->remote_tx_win &&
2098 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2100 skb = chan->tx_send_head;
2102 bt_cb(skb)->control.retries = 1;
2103 control = &bt_cb(skb)->control;
2105 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2108 control->reqseq = chan->buffer_seq;
2109 chan->last_acked_seq = chan->buffer_seq;
2110 control->txseq = chan->next_tx_seq;
2112 __pack_control(chan, control, skb);
2114 if (chan->fcs == L2CAP_FCS_CRC16) {
2115 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2116 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2119 /* Clone after data has been modified. Data is assumed to be
2120 read-only (for locking purposes) on cloned sk_buffs.
2122 tx_skb = skb_clone(skb, GFP_KERNEL);
2127 __set_retrans_timer(chan);
2129 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2130 chan->unacked_frames++;
2131 chan->frames_sent++;
2134 if (skb_queue_is_last(&chan->tx_q, skb))
2135 chan->tx_send_head = NULL;
2137 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2139 l2cap_do_send(chan, tx_skb);
2140 BT_DBG("Sent txseq %u", control->txseq);
2143 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2144 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2149 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2151 struct l2cap_ctrl control;
2152 struct sk_buff *skb;
2153 struct sk_buff *tx_skb;
2156 BT_DBG("chan %p", chan);
2158 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2161 if (__chan_is_moving(chan))
2164 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2165 seq = l2cap_seq_list_pop(&chan->retrans_list);
2167 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2169 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2174 bt_cb(skb)->control.retries++;
2175 control = bt_cb(skb)->control;
2177 if (chan->max_tx != 0 &&
2178 bt_cb(skb)->control.retries > chan->max_tx) {
2179 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2180 l2cap_send_disconn_req(chan, ECONNRESET);
2181 l2cap_seq_list_clear(&chan->retrans_list);
2185 control.reqseq = chan->buffer_seq;
2186 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2191 if (skb_cloned(skb)) {
2192 /* Cloned sk_buffs are read-only, so we need a
2195 tx_skb = skb_copy(skb, GFP_KERNEL);
2197 tx_skb = skb_clone(skb, GFP_KERNEL);
2201 l2cap_seq_list_clear(&chan->retrans_list);
2205 /* Update skb contents */
2206 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2207 put_unaligned_le32(__pack_extended_control(&control),
2208 tx_skb->data + L2CAP_HDR_SIZE);
2210 put_unaligned_le16(__pack_enhanced_control(&control),
2211 tx_skb->data + L2CAP_HDR_SIZE);
2214 if (chan->fcs == L2CAP_FCS_CRC16) {
2215 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2216 put_unaligned_le16(fcs, skb_put(tx_skb,
2220 l2cap_do_send(chan, tx_skb);
2222 BT_DBG("Resent txseq %d", control.txseq);
2224 chan->last_acked_seq = chan->buffer_seq;
2228 static void l2cap_retransmit(struct l2cap_chan *chan,
2229 struct l2cap_ctrl *control)
2231 BT_DBG("chan %p, control %p", chan, control);
2233 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2234 l2cap_ertm_resend(chan);
2237 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2238 struct l2cap_ctrl *control)
2240 struct sk_buff *skb;
2242 BT_DBG("chan %p, control %p", chan, control);
2245 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2247 l2cap_seq_list_clear(&chan->retrans_list);
2249 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2252 if (chan->unacked_frames) {
2253 skb_queue_walk(&chan->tx_q, skb) {
2254 if (bt_cb(skb)->control.txseq == control->reqseq ||
2255 skb == chan->tx_send_head)
2259 skb_queue_walk_from(&chan->tx_q, skb) {
2260 if (skb == chan->tx_send_head)
2263 l2cap_seq_list_append(&chan->retrans_list,
2264 bt_cb(skb)->control.txseq);
2267 l2cap_ertm_resend(chan);
2271 static void l2cap_send_ack(struct l2cap_chan *chan)
2273 struct l2cap_ctrl control;
2274 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2275 chan->last_acked_seq);
2278 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2279 chan, chan->last_acked_seq, chan->buffer_seq);
2281 memset(&control, 0, sizeof(control));
2284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2285 chan->rx_state == L2CAP_RX_STATE_RECV) {
2286 __clear_ack_timer(chan);
2287 control.super = L2CAP_SUPER_RNR;
2288 control.reqseq = chan->buffer_seq;
2289 l2cap_send_sframe(chan, &control);
2291 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2292 l2cap_ertm_send(chan);
2293 /* If any i-frames were sent, they included an ack */
2294 if (chan->buffer_seq == chan->last_acked_seq)
2298 /* Ack now if the window is 3/4ths full.
2299 * Calculate without mul or div
2301 threshold = chan->ack_win;
2302 threshold += threshold << 1;
2305 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2308 if (frames_to_ack >= threshold) {
2309 __clear_ack_timer(chan);
2310 control.super = L2CAP_SUPER_RR;
2311 control.reqseq = chan->buffer_seq;
2312 l2cap_send_sframe(chan, &control);
2317 __set_ack_timer(chan);
2321 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2322 struct msghdr *msg, int len,
2323 int count, struct sk_buff *skb)
2325 struct l2cap_conn *conn = chan->conn;
2326 struct sk_buff **frag;
2329 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2335 /* Continuation fragments (no L2CAP header) */
2336 frag = &skb_shinfo(skb)->frag_list;
2338 struct sk_buff *tmp;
2340 count = min_t(unsigned int, conn->mtu, len);
2342 tmp = chan->ops->alloc_skb(chan, count,
2343 msg->msg_flags & MSG_DONTWAIT);
2345 return PTR_ERR(tmp);
2349 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2352 (*frag)->priority = skb->priority;
2357 skb->len += (*frag)->len;
2358 skb->data_len += (*frag)->len;
2360 frag = &(*frag)->next;
2366 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2367 struct msghdr *msg, size_t len,
2370 struct l2cap_conn *conn = chan->conn;
2371 struct sk_buff *skb;
2372 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2373 struct l2cap_hdr *lh;
2375 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2376 __le16_to_cpu(chan->psm), len, priority);
2378 count = min_t(unsigned int, (conn->mtu - hlen), len);
2380 skb = chan->ops->alloc_skb(chan, count + hlen,
2381 msg->msg_flags & MSG_DONTWAIT);
2385 skb->priority = priority;
2387 /* Create L2CAP header */
2388 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2389 lh->cid = cpu_to_le16(chan->dcid);
2390 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2391 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2393 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2394 if (unlikely(err < 0)) {
2396 return ERR_PTR(err);
2401 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2402 struct msghdr *msg, size_t len,
2405 struct l2cap_conn *conn = chan->conn;
2406 struct sk_buff *skb;
2408 struct l2cap_hdr *lh;
2410 BT_DBG("chan %p len %zu", chan, len);
2412 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2414 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2415 msg->msg_flags & MSG_DONTWAIT);
2419 skb->priority = priority;
2421 /* Create L2CAP header */
2422 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2423 lh->cid = cpu_to_le16(chan->dcid);
2424 lh->len = cpu_to_le16(len);
2426 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2427 if (unlikely(err < 0)) {
2429 return ERR_PTR(err);
2434 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2435 struct msghdr *msg, size_t len,
2438 struct l2cap_conn *conn = chan->conn;
2439 struct sk_buff *skb;
2440 int err, count, hlen;
2441 struct l2cap_hdr *lh;
2443 BT_DBG("chan %p len %zu", chan, len);
2446 return ERR_PTR(-ENOTCONN);
2448 hlen = __ertm_hdr_size(chan);
2451 hlen += L2CAP_SDULEN_SIZE;
2453 if (chan->fcs == L2CAP_FCS_CRC16)
2454 hlen += L2CAP_FCS_SIZE;
2456 count = min_t(unsigned int, (conn->mtu - hlen), len);
2458 skb = chan->ops->alloc_skb(chan, count + hlen,
2459 msg->msg_flags & MSG_DONTWAIT);
2463 /* Create L2CAP header */
2464 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2465 lh->cid = cpu_to_le16(chan->dcid);
2466 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2468 /* Control header is populated later */
2469 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2470 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2472 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2475 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2477 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2478 if (unlikely(err < 0)) {
2480 return ERR_PTR(err);
2483 bt_cb(skb)->control.fcs = chan->fcs;
2484 bt_cb(skb)->control.retries = 0;
2488 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2489 struct sk_buff_head *seg_queue,
2490 struct msghdr *msg, size_t len)
2492 struct sk_buff *skb;
2497 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2499 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2500 * so fragmented skbs are not used. The HCI layer's handling
2501 * of fragmented skbs is not compatible with ERTM's queueing.
2504 /* PDU size is derived from the HCI MTU */
2505 pdu_len = chan->conn->mtu;
2507 /* Constrain PDU size for BR/EDR connections */
2509 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2511 /* Adjust for largest possible L2CAP overhead. */
2513 pdu_len -= L2CAP_FCS_SIZE;
2515 pdu_len -= __ertm_hdr_size(chan);
2517 /* Remote device may have requested smaller PDUs */
2518 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2520 if (len <= pdu_len) {
2521 sar = L2CAP_SAR_UNSEGMENTED;
2525 sar = L2CAP_SAR_START;
2527 pdu_len -= L2CAP_SDULEN_SIZE;
2531 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2534 __skb_queue_purge(seg_queue);
2535 return PTR_ERR(skb);
2538 bt_cb(skb)->control.sar = sar;
2539 __skb_queue_tail(seg_queue, skb);
2544 pdu_len += L2CAP_SDULEN_SIZE;
2547 if (len <= pdu_len) {
2548 sar = L2CAP_SAR_END;
2551 sar = L2CAP_SAR_CONTINUE;
2558 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2560 size_t len, u16 sdulen)
2562 struct l2cap_conn *conn = chan->conn;
2563 struct sk_buff *skb;
2564 int err, count, hlen;
2565 struct l2cap_hdr *lh;
2567 BT_DBG("chan %p len %zu", chan, len);
2570 return ERR_PTR(-ENOTCONN);
2572 hlen = L2CAP_HDR_SIZE;
2575 hlen += L2CAP_SDULEN_SIZE;
2577 count = min_t(unsigned int, (conn->mtu - hlen), len);
2579 skb = chan->ops->alloc_skb(chan, count + hlen,
2580 msg->msg_flags & MSG_DONTWAIT);
2584 /* Create L2CAP header */
2585 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2586 lh->cid = cpu_to_le16(chan->dcid);
2587 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2590 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2592 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2593 if (unlikely(err < 0)) {
2595 return ERR_PTR(err);
2601 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2602 struct sk_buff_head *seg_queue,
2603 struct msghdr *msg, size_t len)
2605 struct sk_buff *skb;
2609 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2611 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2613 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2616 pdu_len -= L2CAP_SDULEN_SIZE;
2622 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2624 __skb_queue_purge(seg_queue);
2625 return PTR_ERR(skb);
2628 __skb_queue_tail(seg_queue, skb);
2634 pdu_len += L2CAP_SDULEN_SIZE;
2641 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2644 struct sk_buff *skb;
2646 struct sk_buff_head seg_queue;
2651 /* Connectionless channel */
2652 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2653 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2655 return PTR_ERR(skb);
2657 l2cap_do_send(chan, skb);
2661 switch (chan->mode) {
2662 case L2CAP_MODE_LE_FLOWCTL:
2663 /* Check outgoing MTU */
2664 if (len > chan->omtu)
2667 if (!chan->tx_credits)
2670 __skb_queue_head_init(&seg_queue);
2672 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2674 if (chan->state != BT_CONNECTED) {
2675 __skb_queue_purge(&seg_queue);
2682 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2684 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2685 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2689 if (!chan->tx_credits)
2690 chan->ops->suspend(chan);
2696 case L2CAP_MODE_BASIC:
2697 /* Check outgoing MTU */
2698 if (len > chan->omtu)
2701 /* Create a basic PDU */
2702 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2704 return PTR_ERR(skb);
2706 l2cap_do_send(chan, skb);
2710 case L2CAP_MODE_ERTM:
2711 case L2CAP_MODE_STREAMING:
2712 /* Check outgoing MTU */
2713 if (len > chan->omtu) {
2718 __skb_queue_head_init(&seg_queue);
2720 /* Do segmentation before calling in to the state machine,
2721 * since it's possible to block while waiting for memory
2724 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2726 /* The channel could have been closed while segmenting,
2727 * check that it is still connected.
2729 if (chan->state != BT_CONNECTED) {
2730 __skb_queue_purge(&seg_queue);
2737 if (chan->mode == L2CAP_MODE_ERTM)
2738 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2740 l2cap_streaming_send(chan, &seg_queue);
2744 /* If the skbs were not queued for sending, they'll still be in
2745 * seg_queue and need to be purged.
2747 __skb_queue_purge(&seg_queue);
2751 BT_DBG("bad state %1.1x", chan->mode);
2758 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2760 struct l2cap_ctrl control;
2763 BT_DBG("chan %p, txseq %u", chan, txseq);
2765 memset(&control, 0, sizeof(control));
2767 control.super = L2CAP_SUPER_SREJ;
2769 for (seq = chan->expected_tx_seq; seq != txseq;
2770 seq = __next_seq(chan, seq)) {
2771 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2772 control.reqseq = seq;
2773 l2cap_send_sframe(chan, &control);
2774 l2cap_seq_list_append(&chan->srej_list, seq);
2778 chan->expected_tx_seq = __next_seq(chan, txseq);
2781 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2783 struct l2cap_ctrl control;
2785 BT_DBG("chan %p", chan);
2787 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2790 memset(&control, 0, sizeof(control));
2792 control.super = L2CAP_SUPER_SREJ;
2793 control.reqseq = chan->srej_list.tail;
2794 l2cap_send_sframe(chan, &control);
2797 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2799 struct l2cap_ctrl control;
2803 BT_DBG("chan %p, txseq %u", chan, txseq);
2805 memset(&control, 0, sizeof(control));
2807 control.super = L2CAP_SUPER_SREJ;
2809 /* Capture initial list head to allow only one pass through the list. */
2810 initial_head = chan->srej_list.head;
2813 seq = l2cap_seq_list_pop(&chan->srej_list);
2814 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2817 control.reqseq = seq;
2818 l2cap_send_sframe(chan, &control);
2819 l2cap_seq_list_append(&chan->srej_list, seq);
2820 } while (chan->srej_list.head != initial_head);
2823 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2825 struct sk_buff *acked_skb;
2828 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2830 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2833 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2834 chan->expected_ack_seq, chan->unacked_frames);
2836 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2837 ackseq = __next_seq(chan, ackseq)) {
2839 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2841 skb_unlink(acked_skb, &chan->tx_q);
2842 kfree_skb(acked_skb);
2843 chan->unacked_frames--;
2847 chan->expected_ack_seq = reqseq;
2849 if (chan->unacked_frames == 0)
2850 __clear_retrans_timer(chan);
2852 BT_DBG("unacked_frames %u", chan->unacked_frames);
2855 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2857 BT_DBG("chan %p", chan);
2859 chan->expected_tx_seq = chan->buffer_seq;
2860 l2cap_seq_list_clear(&chan->srej_list);
2861 skb_queue_purge(&chan->srej_q);
2862 chan->rx_state = L2CAP_RX_STATE_RECV;
2865 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2866 struct l2cap_ctrl *control,
2867 struct sk_buff_head *skbs, u8 event)
2869 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2873 case L2CAP_EV_DATA_REQUEST:
2874 if (chan->tx_send_head == NULL)
2875 chan->tx_send_head = skb_peek(skbs);
2877 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2878 l2cap_ertm_send(chan);
2880 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2881 BT_DBG("Enter LOCAL_BUSY");
2882 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2884 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2885 /* The SREJ_SENT state must be aborted if we are to
2886 * enter the LOCAL_BUSY state.
2888 l2cap_abort_rx_srej_sent(chan);
2891 l2cap_send_ack(chan);
2894 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2895 BT_DBG("Exit LOCAL_BUSY");
2896 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2898 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2899 struct l2cap_ctrl local_control;
2901 memset(&local_control, 0, sizeof(local_control));
2902 local_control.sframe = 1;
2903 local_control.super = L2CAP_SUPER_RR;
2904 local_control.poll = 1;
2905 local_control.reqseq = chan->buffer_seq;
2906 l2cap_send_sframe(chan, &local_control);
2908 chan->retry_count = 1;
2909 __set_monitor_timer(chan);
2910 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2913 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2914 l2cap_process_reqseq(chan, control->reqseq);
2916 case L2CAP_EV_EXPLICIT_POLL:
2917 l2cap_send_rr_or_rnr(chan, 1);
2918 chan->retry_count = 1;
2919 __set_monitor_timer(chan);
2920 __clear_ack_timer(chan);
2921 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2923 case L2CAP_EV_RETRANS_TO:
2924 l2cap_send_rr_or_rnr(chan, 1);
2925 chan->retry_count = 1;
2926 __set_monitor_timer(chan);
2927 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2929 case L2CAP_EV_RECV_FBIT:
2930 /* Nothing to process */
2937 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2938 struct l2cap_ctrl *control,
2939 struct sk_buff_head *skbs, u8 event)
2941 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2945 case L2CAP_EV_DATA_REQUEST:
2946 if (chan->tx_send_head == NULL)
2947 chan->tx_send_head = skb_peek(skbs);
2948 /* Queue data, but don't send. */
2949 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2951 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2952 BT_DBG("Enter LOCAL_BUSY");
2953 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2955 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2956 /* The SREJ_SENT state must be aborted if we are to
2957 * enter the LOCAL_BUSY state.
2959 l2cap_abort_rx_srej_sent(chan);
2962 l2cap_send_ack(chan);
2965 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2966 BT_DBG("Exit LOCAL_BUSY");
2967 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2969 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2970 struct l2cap_ctrl local_control;
2971 memset(&local_control, 0, sizeof(local_control));
2972 local_control.sframe = 1;
2973 local_control.super = L2CAP_SUPER_RR;
2974 local_control.poll = 1;
2975 local_control.reqseq = chan->buffer_seq;
2976 l2cap_send_sframe(chan, &local_control);
2978 chan->retry_count = 1;
2979 __set_monitor_timer(chan);
2980 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2983 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2984 l2cap_process_reqseq(chan, control->reqseq);
2988 case L2CAP_EV_RECV_FBIT:
2989 if (control && control->final) {
2990 __clear_monitor_timer(chan);
2991 if (chan->unacked_frames > 0)
2992 __set_retrans_timer(chan);
2993 chan->retry_count = 0;
2994 chan->tx_state = L2CAP_TX_STATE_XMIT;
2995 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2998 case L2CAP_EV_EXPLICIT_POLL:
3001 case L2CAP_EV_MONITOR_TO:
3002 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3003 l2cap_send_rr_or_rnr(chan, 1);
3004 __set_monitor_timer(chan);
3005 chan->retry_count++;
3007 l2cap_send_disconn_req(chan, ECONNABORTED);
3015 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3016 struct sk_buff_head *skbs, u8 event)
3018 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3019 chan, control, skbs, event, chan->tx_state);
3021 switch (chan->tx_state) {
3022 case L2CAP_TX_STATE_XMIT:
3023 l2cap_tx_state_xmit(chan, control, skbs, event);
3025 case L2CAP_TX_STATE_WAIT_F:
3026 l2cap_tx_state_wait_f(chan, control, skbs, event);
3034 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3035 struct l2cap_ctrl *control)
3037 BT_DBG("chan %p, control %p", chan, control);
3038 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3041 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3042 struct l2cap_ctrl *control)
3044 BT_DBG("chan %p, control %p", chan, control);
3045 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3048 /* Copy frame to all raw sockets on that connection */
3049 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3051 struct sk_buff *nskb;
3052 struct l2cap_chan *chan;
3054 BT_DBG("conn %p", conn);
3056 mutex_lock(&conn->chan_lock);
3058 list_for_each_entry(chan, &conn->chan_l, list) {
3059 if (chan->chan_type != L2CAP_CHAN_RAW)
3062 /* Don't send frame to the channel it came from */
3063 if (bt_cb(skb)->chan == chan)
3066 nskb = skb_clone(skb, GFP_KERNEL);
3069 if (chan->ops->recv(chan, nskb))
3073 mutex_unlock(&conn->chan_lock);
3076 /* ---- L2CAP signalling commands ---- */
3077 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3078 u8 ident, u16 dlen, void *data)
3080 struct sk_buff *skb, **frag;
3081 struct l2cap_cmd_hdr *cmd;
3082 struct l2cap_hdr *lh;
3085 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3086 conn, code, ident, dlen);
3088 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3091 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3092 count = min_t(unsigned int, conn->mtu, len);
3094 skb = bt_skb_alloc(count, GFP_KERNEL);
3098 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3099 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3101 if (conn->hcon->type == LE_LINK)
3102 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3104 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3106 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3109 cmd->len = cpu_to_le16(dlen);
3112 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3113 memcpy(skb_put(skb, count), data, count);
3119 /* Continuation fragments (no L2CAP header) */
3120 frag = &skb_shinfo(skb)->frag_list;
3122 count = min_t(unsigned int, conn->mtu, len);
3124 *frag = bt_skb_alloc(count, GFP_KERNEL);
3128 memcpy(skb_put(*frag, count), data, count);
3133 frag = &(*frag)->next;
3143 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3146 struct l2cap_conf_opt *opt = *ptr;
3149 len = L2CAP_CONF_OPT_SIZE + opt->len;
3157 *val = *((u8 *) opt->val);
3161 *val = get_unaligned_le16(opt->val);
3165 *val = get_unaligned_le32(opt->val);
3169 *val = (unsigned long) opt->val;
3173 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3177 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3179 struct l2cap_conf_opt *opt = *ptr;
3181 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3188 *((u8 *) opt->val) = val;
3192 put_unaligned_le16(val, opt->val);
3196 put_unaligned_le32(val, opt->val);
3200 memcpy(opt->val, (void *) val, len);
3204 *ptr += L2CAP_CONF_OPT_SIZE + len;
3207 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3209 struct l2cap_conf_efs efs;
3211 switch (chan->mode) {
3212 case L2CAP_MODE_ERTM:
3213 efs.id = chan->local_id;
3214 efs.stype = chan->local_stype;
3215 efs.msdu = cpu_to_le16(chan->local_msdu);
3216 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3217 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3218 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3221 case L2CAP_MODE_STREAMING:
3223 efs.stype = L2CAP_SERV_BESTEFFORT;
3224 efs.msdu = cpu_to_le16(chan->local_msdu);
3225 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3234 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3235 (unsigned long) &efs);
3238 static void l2cap_ack_timeout(struct work_struct *work)
3240 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3244 BT_DBG("chan %p", chan);
3246 l2cap_chan_lock(chan);
3248 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3249 chan->last_acked_seq);
3252 l2cap_send_rr_or_rnr(chan, 0);
3254 l2cap_chan_unlock(chan);
3255 l2cap_chan_put(chan);
3258 int l2cap_ertm_init(struct l2cap_chan *chan)
3262 chan->next_tx_seq = 0;
3263 chan->expected_tx_seq = 0;
3264 chan->expected_ack_seq = 0;
3265 chan->unacked_frames = 0;
3266 chan->buffer_seq = 0;
3267 chan->frames_sent = 0;
3268 chan->last_acked_seq = 0;
3270 chan->sdu_last_frag = NULL;
3273 skb_queue_head_init(&chan->tx_q);
3275 chan->local_amp_id = AMP_ID_BREDR;
3276 chan->move_id = AMP_ID_BREDR;
3277 chan->move_state = L2CAP_MOVE_STABLE;
3278 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3280 if (chan->mode != L2CAP_MODE_ERTM)
3283 chan->rx_state = L2CAP_RX_STATE_RECV;
3284 chan->tx_state = L2CAP_TX_STATE_XMIT;
3286 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3287 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3288 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3290 skb_queue_head_init(&chan->srej_q);
3292 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3296 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3298 l2cap_seq_list_free(&chan->srej_list);
3303 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3306 case L2CAP_MODE_STREAMING:
3307 case L2CAP_MODE_ERTM:
3308 if (l2cap_mode_supported(mode, remote_feat_mask))
3312 return L2CAP_MODE_BASIC;
3316 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3318 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3321 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3323 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3326 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3327 struct l2cap_conf_rfc *rfc)
3329 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3330 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3332 /* Class 1 devices have must have ERTM timeouts
3333 * exceeding the Link Supervision Timeout. The
3334 * default Link Supervision Timeout for AMP
3335 * controllers is 10 seconds.
3337 * Class 1 devices use 0xffffffff for their
3338 * best-effort flush timeout, so the clamping logic
3339 * will result in a timeout that meets the above
3340 * requirement. ERTM timeouts are 16-bit values, so
3341 * the maximum timeout is 65.535 seconds.
3344 /* Convert timeout to milliseconds and round */
3345 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3347 /* This is the recommended formula for class 2 devices
3348 * that start ERTM timers when packets are sent to the
3351 ertm_to = 3 * ertm_to + 500;
3353 if (ertm_to > 0xffff)
3356 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3357 rfc->monitor_timeout = rfc->retrans_timeout;
3359 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3360 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3364 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3366 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3367 __l2cap_ews_supported(chan->conn)) {
3368 /* use extended control field */
3369 set_bit(FLAG_EXT_CTRL, &chan->flags);
3370 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3372 chan->tx_win = min_t(u16, chan->tx_win,
3373 L2CAP_DEFAULT_TX_WINDOW);
3374 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3376 chan->ack_win = chan->tx_win;
3379 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3381 struct l2cap_conf_req *req = data;
3382 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3383 void *ptr = req->data;
3386 BT_DBG("chan %p", chan);
3388 if (chan->num_conf_req || chan->num_conf_rsp)
3391 switch (chan->mode) {
3392 case L2CAP_MODE_STREAMING:
3393 case L2CAP_MODE_ERTM:
3394 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3397 if (__l2cap_efs_supported(chan->conn))
3398 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3402 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3407 if (chan->imtu != L2CAP_DEFAULT_MTU)
3408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3410 switch (chan->mode) {
3411 case L2CAP_MODE_BASIC:
3412 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3413 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3416 rfc.mode = L2CAP_MODE_BASIC;
3418 rfc.max_transmit = 0;
3419 rfc.retrans_timeout = 0;
3420 rfc.monitor_timeout = 0;
3421 rfc.max_pdu_size = 0;
3423 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3424 (unsigned long) &rfc);
3427 case L2CAP_MODE_ERTM:
3428 rfc.mode = L2CAP_MODE_ERTM;
3429 rfc.max_transmit = chan->max_tx;
3431 __l2cap_set_ertm_timeouts(chan, &rfc);
3433 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3434 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3436 rfc.max_pdu_size = cpu_to_le16(size);
3438 l2cap_txwin_setup(chan);
3440 rfc.txwin_size = min_t(u16, chan->tx_win,
3441 L2CAP_DEFAULT_TX_WINDOW);
3443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3444 (unsigned long) &rfc);
3446 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3447 l2cap_add_opt_efs(&ptr, chan);
3449 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3453 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3454 if (chan->fcs == L2CAP_FCS_NONE ||
3455 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3456 chan->fcs = L2CAP_FCS_NONE;
3457 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3462 case L2CAP_MODE_STREAMING:
3463 l2cap_txwin_setup(chan);
3464 rfc.mode = L2CAP_MODE_STREAMING;
3466 rfc.max_transmit = 0;
3467 rfc.retrans_timeout = 0;
3468 rfc.monitor_timeout = 0;
3470 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3471 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3473 rfc.max_pdu_size = cpu_to_le16(size);
3475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3476 (unsigned long) &rfc);
3478 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3479 l2cap_add_opt_efs(&ptr, chan);
3481 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3482 if (chan->fcs == L2CAP_FCS_NONE ||
3483 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3484 chan->fcs = L2CAP_FCS_NONE;
3485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3491 req->dcid = cpu_to_le16(chan->dcid);
3492 req->flags = __constant_cpu_to_le16(0);
3497 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3499 struct l2cap_conf_rsp *rsp = data;
3500 void *ptr = rsp->data;
3501 void *req = chan->conf_req;
3502 int len = chan->conf_len;
3503 int type, hint, olen;
3505 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3506 struct l2cap_conf_efs efs;
3508 u16 mtu = L2CAP_DEFAULT_MTU;
3509 u16 result = L2CAP_CONF_SUCCESS;
3512 BT_DBG("chan %p", chan);
3514 while (len >= L2CAP_CONF_OPT_SIZE) {
3515 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3517 hint = type & L2CAP_CONF_HINT;
3518 type &= L2CAP_CONF_MASK;
3521 case L2CAP_CONF_MTU:
3525 case L2CAP_CONF_FLUSH_TO:
3526 chan->flush_to = val;
3529 case L2CAP_CONF_QOS:
3532 case L2CAP_CONF_RFC:
3533 if (olen == sizeof(rfc))
3534 memcpy(&rfc, (void *) val, olen);
3537 case L2CAP_CONF_FCS:
3538 if (val == L2CAP_FCS_NONE)
3539 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3542 case L2CAP_CONF_EFS:
3544 if (olen == sizeof(efs))
3545 memcpy(&efs, (void *) val, olen);
3548 case L2CAP_CONF_EWS:
3549 if (!chan->conn->hs_enabled)
3550 return -ECONNREFUSED;
3552 set_bit(FLAG_EXT_CTRL, &chan->flags);
3553 set_bit(CONF_EWS_RECV, &chan->conf_state);
3554 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3555 chan->remote_tx_win = val;
3562 result = L2CAP_CONF_UNKNOWN;
3563 *((u8 *) ptr++) = type;
3568 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3571 switch (chan->mode) {
3572 case L2CAP_MODE_STREAMING:
3573 case L2CAP_MODE_ERTM:
3574 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3575 chan->mode = l2cap_select_mode(rfc.mode,
3576 chan->conn->feat_mask);
3581 if (__l2cap_efs_supported(chan->conn))
3582 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3584 return -ECONNREFUSED;
3587 if (chan->mode != rfc.mode)
3588 return -ECONNREFUSED;
3594 if (chan->mode != rfc.mode) {
3595 result = L2CAP_CONF_UNACCEPT;
3596 rfc.mode = chan->mode;
3598 if (chan->num_conf_rsp == 1)
3599 return -ECONNREFUSED;
3601 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3602 (unsigned long) &rfc);
3605 if (result == L2CAP_CONF_SUCCESS) {
3606 /* Configure output options and let the other side know
3607 * which ones we don't like. */
3609 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3610 result = L2CAP_CONF_UNACCEPT;
3613 set_bit(CONF_MTU_DONE, &chan->conf_state);
3615 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3618 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3619 efs.stype != L2CAP_SERV_NOTRAFIC &&
3620 efs.stype != chan->local_stype) {
3622 result = L2CAP_CONF_UNACCEPT;
3624 if (chan->num_conf_req >= 1)
3625 return -ECONNREFUSED;
3627 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3629 (unsigned long) &efs);
3631 /* Send PENDING Conf Rsp */
3632 result = L2CAP_CONF_PENDING;
3633 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3638 case L2CAP_MODE_BASIC:
3639 chan->fcs = L2CAP_FCS_NONE;
3640 set_bit(CONF_MODE_DONE, &chan->conf_state);
3643 case L2CAP_MODE_ERTM:
3644 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3645 chan->remote_tx_win = rfc.txwin_size;
3647 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3649 chan->remote_max_tx = rfc.max_transmit;
3651 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3652 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3653 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3654 rfc.max_pdu_size = cpu_to_le16(size);
3655 chan->remote_mps = size;
3657 __l2cap_set_ertm_timeouts(chan, &rfc);
3659 set_bit(CONF_MODE_DONE, &chan->conf_state);
3661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3662 sizeof(rfc), (unsigned long) &rfc);
3664 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3665 chan->remote_id = efs.id;
3666 chan->remote_stype = efs.stype;
3667 chan->remote_msdu = le16_to_cpu(efs.msdu);
3668 chan->remote_flush_to =
3669 le32_to_cpu(efs.flush_to);
3670 chan->remote_acc_lat =
3671 le32_to_cpu(efs.acc_lat);
3672 chan->remote_sdu_itime =
3673 le32_to_cpu(efs.sdu_itime);
3674 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3676 (unsigned long) &efs);
3680 case L2CAP_MODE_STREAMING:
3681 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3682 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3683 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3684 rfc.max_pdu_size = cpu_to_le16(size);
3685 chan->remote_mps = size;
3687 set_bit(CONF_MODE_DONE, &chan->conf_state);
3689 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3690 (unsigned long) &rfc);
3695 result = L2CAP_CONF_UNACCEPT;
3697 memset(&rfc, 0, sizeof(rfc));
3698 rfc.mode = chan->mode;
3701 if (result == L2CAP_CONF_SUCCESS)
3702 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3704 rsp->scid = cpu_to_le16(chan->dcid);
3705 rsp->result = cpu_to_le16(result);
3706 rsp->flags = __constant_cpu_to_le16(0);
3711 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3712 void *data, u16 *result)
3714 struct l2cap_conf_req *req = data;
3715 void *ptr = req->data;
3718 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3719 struct l2cap_conf_efs efs;
3721 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3723 while (len >= L2CAP_CONF_OPT_SIZE) {
3724 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3727 case L2CAP_CONF_MTU:
3728 if (val < L2CAP_DEFAULT_MIN_MTU) {
3729 *result = L2CAP_CONF_UNACCEPT;
3730 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3736 case L2CAP_CONF_FLUSH_TO:
3737 chan->flush_to = val;
3738 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3742 case L2CAP_CONF_RFC:
3743 if (olen == sizeof(rfc))
3744 memcpy(&rfc, (void *)val, olen);
3746 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3747 rfc.mode != chan->mode)
3748 return -ECONNREFUSED;
3752 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3753 sizeof(rfc), (unsigned long) &rfc);
3756 case L2CAP_CONF_EWS:
3757 chan->ack_win = min_t(u16, val, chan->ack_win);
3758 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3762 case L2CAP_CONF_EFS:
3763 if (olen == sizeof(efs))
3764 memcpy(&efs, (void *)val, olen);
3766 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3767 efs.stype != L2CAP_SERV_NOTRAFIC &&
3768 efs.stype != chan->local_stype)
3769 return -ECONNREFUSED;
3771 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3772 (unsigned long) &efs);
3775 case L2CAP_CONF_FCS:
3776 if (*result == L2CAP_CONF_PENDING)
3777 if (val == L2CAP_FCS_NONE)
3778 set_bit(CONF_RECV_NO_FCS,
3784 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3785 return -ECONNREFUSED;
3787 chan->mode = rfc.mode;
3789 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3791 case L2CAP_MODE_ERTM:
3792 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3793 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3794 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3795 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3796 chan->ack_win = min_t(u16, chan->ack_win,
3799 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3800 chan->local_msdu = le16_to_cpu(efs.msdu);
3801 chan->local_sdu_itime =
3802 le32_to_cpu(efs.sdu_itime);
3803 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3804 chan->local_flush_to =
3805 le32_to_cpu(efs.flush_to);
3809 case L2CAP_MODE_STREAMING:
3810 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3814 req->dcid = cpu_to_le16(chan->dcid);
3815 req->flags = __constant_cpu_to_le16(0);
3820 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3821 u16 result, u16 flags)
3823 struct l2cap_conf_rsp *rsp = data;
3824 void *ptr = rsp->data;
3826 BT_DBG("chan %p", chan);
3828 rsp->scid = cpu_to_le16(chan->dcid);
3829 rsp->result = cpu_to_le16(result);
3830 rsp->flags = cpu_to_le16(flags);
3835 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3837 struct l2cap_le_conn_rsp rsp;
3838 struct l2cap_conn *conn = chan->conn;
3840 BT_DBG("chan %p", chan);
3842 rsp.dcid = cpu_to_le16(chan->scid);
3843 rsp.mtu = cpu_to_le16(chan->imtu);
3844 rsp.mps = cpu_to_le16(chan->mps);
3845 rsp.credits = cpu_to_le16(chan->rx_credits);
3846 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3848 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3852 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3854 struct l2cap_conn_rsp rsp;
3855 struct l2cap_conn *conn = chan->conn;
3859 rsp.scid = cpu_to_le16(chan->dcid);
3860 rsp.dcid = cpu_to_le16(chan->scid);
3861 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3862 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3865 rsp_code = L2CAP_CREATE_CHAN_RSP;
3867 rsp_code = L2CAP_CONN_RSP;
3869 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3871 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3873 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3876 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3877 l2cap_build_conf_req(chan, buf), buf);
3878 chan->num_conf_req++;
3881 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3885 /* Use sane default values in case a misbehaving remote device
3886 * did not send an RFC or extended window size option.
3888 u16 txwin_ext = chan->ack_win;
3889 struct l2cap_conf_rfc rfc = {
3891 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3892 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3893 .max_pdu_size = cpu_to_le16(chan->imtu),
3894 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3897 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3899 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3902 while (len >= L2CAP_CONF_OPT_SIZE) {
3903 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3906 case L2CAP_CONF_RFC:
3907 if (olen == sizeof(rfc))
3908 memcpy(&rfc, (void *)val, olen);
3910 case L2CAP_CONF_EWS:
3917 case L2CAP_MODE_ERTM:
3918 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3919 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3920 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3921 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3922 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3924 chan->ack_win = min_t(u16, chan->ack_win,
3927 case L2CAP_MODE_STREAMING:
3928 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3932 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3933 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3936 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3938 if (cmd_len < sizeof(*rej))
3941 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3944 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3945 cmd->ident == conn->info_ident) {
3946 cancel_delayed_work(&conn->info_timer);
3948 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3949 conn->info_ident = 0;
3951 l2cap_conn_start(conn);
3957 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3958 struct l2cap_cmd_hdr *cmd,
3959 u8 *data, u8 rsp_code, u8 amp_id)
3961 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3962 struct l2cap_conn_rsp rsp;
3963 struct l2cap_chan *chan = NULL, *pchan;
3964 int result, status = L2CAP_CS_NO_INFO;
3966 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3967 __le16 psm = req->psm;
3969 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3971 /* Check if we have socket listening on psm */
3972 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3973 &conn->hcon->dst, ACL_LINK);
3975 result = L2CAP_CR_BAD_PSM;
3979 mutex_lock(&conn->chan_lock);
3980 l2cap_chan_lock(pchan);
3982 /* Check if the ACL is secure enough (if not SDP) */
3983 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3984 !hci_conn_check_link_mode(conn->hcon)) {
3985 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3986 result = L2CAP_CR_SEC_BLOCK;
3990 result = L2CAP_CR_NO_MEM;
3992 /* Check if we already have channel with that dcid */
3993 if (__l2cap_get_chan_by_dcid(conn, scid))
3996 chan = pchan->ops->new_connection(pchan);
4000 /* For certain devices (ex: HID mouse), support for authentication,
4001 * pairing and bonding is optional. For such devices, inorder to avoid
4002 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4003 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4005 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4007 bacpy(&chan->src, &conn->hcon->src);
4008 bacpy(&chan->dst, &conn->hcon->dst);
4009 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4010 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4013 chan->local_amp_id = amp_id;
4015 __l2cap_chan_add(conn, chan);
4019 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4021 chan->ident = cmd->ident;
4023 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4024 if (l2cap_chan_check_security(chan)) {
4025 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4026 l2cap_state_change(chan, BT_CONNECT2);
4027 result = L2CAP_CR_PEND;
4028 status = L2CAP_CS_AUTHOR_PEND;
4029 chan->ops->defer(chan);
4031 /* Force pending result for AMP controllers.
4032 * The connection will succeed after the
4033 * physical link is up.
4035 if (amp_id == AMP_ID_BREDR) {
4036 l2cap_state_change(chan, BT_CONFIG);
4037 result = L2CAP_CR_SUCCESS;
4039 l2cap_state_change(chan, BT_CONNECT2);
4040 result = L2CAP_CR_PEND;
4042 status = L2CAP_CS_NO_INFO;
4045 l2cap_state_change(chan, BT_CONNECT2);
4046 result = L2CAP_CR_PEND;
4047 status = L2CAP_CS_AUTHEN_PEND;
4050 l2cap_state_change(chan, BT_CONNECT2);
4051 result = L2CAP_CR_PEND;
4052 status = L2CAP_CS_NO_INFO;
4056 l2cap_chan_unlock(pchan);
4057 mutex_unlock(&conn->chan_lock);
4060 rsp.scid = cpu_to_le16(scid);
4061 rsp.dcid = cpu_to_le16(dcid);
4062 rsp.result = cpu_to_le16(result);
4063 rsp.status = cpu_to_le16(status);
4064 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4066 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4067 struct l2cap_info_req info;
4068 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4070 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4071 conn->info_ident = l2cap_get_ident(conn);
4073 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4075 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4076 sizeof(info), &info);
4079 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4080 result == L2CAP_CR_SUCCESS) {
4082 set_bit(CONF_REQ_SENT, &chan->conf_state);
4083 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4084 l2cap_build_conf_req(chan, buf), buf);
4085 chan->num_conf_req++;
4091 static int l2cap_connect_req(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4094 struct hci_dev *hdev = conn->hcon->hdev;
4095 struct hci_conn *hcon = conn->hcon;
4097 if (cmd_len < sizeof(struct l2cap_conn_req))
4101 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4102 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4103 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4104 hcon->dst_type, 0, NULL, 0,
4106 hci_dev_unlock(hdev);
4108 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4112 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4113 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4116 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4117 u16 scid, dcid, result, status;
4118 struct l2cap_chan *chan;
4122 if (cmd_len < sizeof(*rsp))
4125 scid = __le16_to_cpu(rsp->scid);
4126 dcid = __le16_to_cpu(rsp->dcid);
4127 result = __le16_to_cpu(rsp->result);
4128 status = __le16_to_cpu(rsp->status);
4130 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4131 dcid, scid, result, status);
4133 mutex_lock(&conn->chan_lock);
4136 chan = __l2cap_get_chan_by_scid(conn, scid);
4142 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4151 l2cap_chan_lock(chan);
4154 case L2CAP_CR_SUCCESS:
4155 l2cap_state_change(chan, BT_CONFIG);
4158 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4160 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4163 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4164 l2cap_build_conf_req(chan, req), req);
4165 chan->num_conf_req++;
4169 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4173 l2cap_chan_del(chan, ECONNREFUSED);
4177 l2cap_chan_unlock(chan);
4180 mutex_unlock(&conn->chan_lock);
4185 static inline void set_default_fcs(struct l2cap_chan *chan)
4187 /* FCS is enabled only in ERTM or streaming mode, if one or both
4190 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4191 chan->fcs = L2CAP_FCS_NONE;
4192 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4193 chan->fcs = L2CAP_FCS_CRC16;
4196 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4197 u8 ident, u16 flags)
4199 struct l2cap_conn *conn = chan->conn;
4201 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4204 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4205 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4207 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4208 l2cap_build_conf_rsp(chan, data,
4209 L2CAP_CONF_SUCCESS, flags), data);
4212 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4215 struct l2cap_cmd_rej_cid rej;
4217 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4218 rej.scid = __cpu_to_le16(scid);
4219 rej.dcid = __cpu_to_le16(dcid);
4221 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4224 static inline int l2cap_config_req(struct l2cap_conn *conn,
4225 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4228 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4231 struct l2cap_chan *chan;
4234 if (cmd_len < sizeof(*req))
4237 dcid = __le16_to_cpu(req->dcid);
4238 flags = __le16_to_cpu(req->flags);
4240 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4242 chan = l2cap_get_chan_by_scid(conn, dcid);
4244 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4248 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4249 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4254 /* Reject if config buffer is too small. */
4255 len = cmd_len - sizeof(*req);
4256 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4257 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4258 l2cap_build_conf_rsp(chan, rsp,
4259 L2CAP_CONF_REJECT, flags), rsp);
4264 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4265 chan->conf_len += len;
4267 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4268 /* Incomplete config. Send empty response. */
4269 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4270 l2cap_build_conf_rsp(chan, rsp,
4271 L2CAP_CONF_SUCCESS, flags), rsp);
4275 /* Complete config. */
4276 len = l2cap_parse_conf_req(chan, rsp);
4278 l2cap_send_disconn_req(chan, ECONNRESET);
4282 chan->ident = cmd->ident;
4283 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4284 chan->num_conf_rsp++;
4286 /* Reset config buffer. */
4289 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4292 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4293 set_default_fcs(chan);
4295 if (chan->mode == L2CAP_MODE_ERTM ||
4296 chan->mode == L2CAP_MODE_STREAMING)
4297 err = l2cap_ertm_init(chan);
4300 l2cap_send_disconn_req(chan, -err);
4302 l2cap_chan_ready(chan);
4307 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4309 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4310 l2cap_build_conf_req(chan, buf), buf);
4311 chan->num_conf_req++;
4314 /* Got Conf Rsp PENDING from remote side and asume we sent
4315 Conf Rsp PENDING in the code above */
4316 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4317 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4319 /* check compatibility */
4321 /* Send rsp for BR/EDR channel */
4323 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4325 chan->ident = cmd->ident;
4329 l2cap_chan_unlock(chan);
4333 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4334 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4337 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4338 u16 scid, flags, result;
4339 struct l2cap_chan *chan;
4340 int len = cmd_len - sizeof(*rsp);
4343 if (cmd_len < sizeof(*rsp))
4346 scid = __le16_to_cpu(rsp->scid);
4347 flags = __le16_to_cpu(rsp->flags);
4348 result = __le16_to_cpu(rsp->result);
4350 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4353 chan = l2cap_get_chan_by_scid(conn, scid);
4358 case L2CAP_CONF_SUCCESS:
4359 l2cap_conf_rfc_get(chan, rsp->data, len);
4360 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4363 case L2CAP_CONF_PENDING:
4364 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4366 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4369 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4372 l2cap_send_disconn_req(chan, ECONNRESET);
4376 if (!chan->hs_hcon) {
4377 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4380 if (l2cap_check_efs(chan)) {
4381 amp_create_logical_link(chan);
4382 chan->ident = cmd->ident;
4388 case L2CAP_CONF_UNACCEPT:
4389 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4392 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4393 l2cap_send_disconn_req(chan, ECONNRESET);
4397 /* throw out any old stored conf requests */
4398 result = L2CAP_CONF_SUCCESS;
4399 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4402 l2cap_send_disconn_req(chan, ECONNRESET);
4406 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4407 L2CAP_CONF_REQ, len, req);
4408 chan->num_conf_req++;
4409 if (result != L2CAP_CONF_SUCCESS)
4415 l2cap_chan_set_err(chan, ECONNRESET);
4417 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4418 l2cap_send_disconn_req(chan, ECONNRESET);
4422 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4425 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4427 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4428 set_default_fcs(chan);
4430 if (chan->mode == L2CAP_MODE_ERTM ||
4431 chan->mode == L2CAP_MODE_STREAMING)
4432 err = l2cap_ertm_init(chan);
4435 l2cap_send_disconn_req(chan, -err);
4437 l2cap_chan_ready(chan);
4441 l2cap_chan_unlock(chan);
4445 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4446 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4449 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4450 struct l2cap_disconn_rsp rsp;
4452 struct l2cap_chan *chan;
4454 if (cmd_len != sizeof(*req))
4457 scid = __le16_to_cpu(req->scid);
4458 dcid = __le16_to_cpu(req->dcid);
4460 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4462 mutex_lock(&conn->chan_lock);
4464 chan = __l2cap_get_chan_by_scid(conn, dcid);
4466 mutex_unlock(&conn->chan_lock);
4467 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4471 l2cap_chan_lock(chan);
4473 rsp.dcid = cpu_to_le16(chan->scid);
4474 rsp.scid = cpu_to_le16(chan->dcid);
4475 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4477 chan->ops->set_shutdown(chan);
4479 l2cap_chan_hold(chan);
4480 l2cap_chan_del(chan, ECONNRESET);
4482 l2cap_chan_unlock(chan);
4484 chan->ops->close(chan);
4485 l2cap_chan_put(chan);
4487 mutex_unlock(&conn->chan_lock);
4492 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4493 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4496 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4498 struct l2cap_chan *chan;
4500 if (cmd_len != sizeof(*rsp))
4503 scid = __le16_to_cpu(rsp->scid);
4504 dcid = __le16_to_cpu(rsp->dcid);
4506 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4508 mutex_lock(&conn->chan_lock);
4510 chan = __l2cap_get_chan_by_scid(conn, scid);
4512 mutex_unlock(&conn->chan_lock);
4516 l2cap_chan_lock(chan);
4518 l2cap_chan_hold(chan);
4519 l2cap_chan_del(chan, 0);
4521 l2cap_chan_unlock(chan);
4523 chan->ops->close(chan);
4524 l2cap_chan_put(chan);
4526 mutex_unlock(&conn->chan_lock);
4531 static inline int l2cap_information_req(struct l2cap_conn *conn,
4532 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4535 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4538 if (cmd_len != sizeof(*req))
4541 type = __le16_to_cpu(req->type);
4543 BT_DBG("type 0x%4.4x", type);
4545 if (type == L2CAP_IT_FEAT_MASK) {
4547 u32 feat_mask = l2cap_feat_mask;
4548 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4549 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4550 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4552 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4554 if (conn->hs_enabled)
4555 feat_mask |= L2CAP_FEAT_EXT_FLOW
4556 | L2CAP_FEAT_EXT_WINDOW;
4558 put_unaligned_le32(feat_mask, rsp->data);
4559 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4561 } else if (type == L2CAP_IT_FIXED_CHAN) {
4563 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4565 if (conn->hs_enabled)
4566 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4568 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4570 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4571 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4572 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4573 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4576 struct l2cap_info_rsp rsp;
4577 rsp.type = cpu_to_le16(type);
4578 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4579 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4586 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4587 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4590 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4593 if (cmd_len < sizeof(*rsp))
4596 type = __le16_to_cpu(rsp->type);
4597 result = __le16_to_cpu(rsp->result);
4599 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4601 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4602 if (cmd->ident != conn->info_ident ||
4603 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4606 cancel_delayed_work(&conn->info_timer);
4608 if (result != L2CAP_IR_SUCCESS) {
4609 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4610 conn->info_ident = 0;
4612 l2cap_conn_start(conn);
4618 case L2CAP_IT_FEAT_MASK:
4619 conn->feat_mask = get_unaligned_le32(rsp->data);
4621 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4622 struct l2cap_info_req req;
4623 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4625 conn->info_ident = l2cap_get_ident(conn);
4627 l2cap_send_cmd(conn, conn->info_ident,
4628 L2CAP_INFO_REQ, sizeof(req), &req);
4630 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4631 conn->info_ident = 0;
4633 l2cap_conn_start(conn);
4637 case L2CAP_IT_FIXED_CHAN:
4638 conn->fixed_chan_mask = rsp->data[0];
4639 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4640 conn->info_ident = 0;
4642 l2cap_conn_start(conn);
4649 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4650 struct l2cap_cmd_hdr *cmd,
4651 u16 cmd_len, void *data)
4653 struct l2cap_create_chan_req *req = data;
4654 struct l2cap_create_chan_rsp rsp;
4655 struct l2cap_chan *chan;
4656 struct hci_dev *hdev;
4659 if (cmd_len != sizeof(*req))
4662 if (!conn->hs_enabled)
4665 psm = le16_to_cpu(req->psm);
4666 scid = le16_to_cpu(req->scid);
4668 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4670 /* For controller id 0 make BR/EDR connection */
4671 if (req->amp_id == AMP_ID_BREDR) {
4672 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4677 /* Validate AMP controller id */
4678 hdev = hci_dev_get(req->amp_id);
4682 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4687 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4690 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4691 struct hci_conn *hs_hcon;
4693 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4697 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4702 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4704 mgr->bredr_chan = chan;
4705 chan->hs_hcon = hs_hcon;
4706 chan->fcs = L2CAP_FCS_NONE;
4707 conn->mtu = hdev->block_mtu;
4716 rsp.scid = cpu_to_le16(scid);
4717 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4718 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4720 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4726 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4728 struct l2cap_move_chan_req req;
4731 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4733 ident = l2cap_get_ident(chan->conn);
4734 chan->ident = ident;
4736 req.icid = cpu_to_le16(chan->scid);
4737 req.dest_amp_id = dest_amp_id;
4739 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4742 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4745 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4747 struct l2cap_move_chan_rsp rsp;
4749 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4751 rsp.icid = cpu_to_le16(chan->dcid);
4752 rsp.result = cpu_to_le16(result);
4754 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4758 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4760 struct l2cap_move_chan_cfm cfm;
4762 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4764 chan->ident = l2cap_get_ident(chan->conn);
4766 cfm.icid = cpu_to_le16(chan->scid);
4767 cfm.result = cpu_to_le16(result);
4769 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4772 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4775 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4777 struct l2cap_move_chan_cfm cfm;
4779 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4781 cfm.icid = cpu_to_le16(icid);
4782 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4784 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4788 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4791 struct l2cap_move_chan_cfm_rsp rsp;
4793 BT_DBG("icid 0x%4.4x", icid);
4795 rsp.icid = cpu_to_le16(icid);
4796 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4799 static void __release_logical_link(struct l2cap_chan *chan)
4801 chan->hs_hchan = NULL;
4802 chan->hs_hcon = NULL;
4804 /* Placeholder - release the logical link */
4807 static void l2cap_logical_fail(struct l2cap_chan *chan)
4809 /* Logical link setup failed */
4810 if (chan->state != BT_CONNECTED) {
4811 /* Create channel failure, disconnect */
4812 l2cap_send_disconn_req(chan, ECONNRESET);
4816 switch (chan->move_role) {
4817 case L2CAP_MOVE_ROLE_RESPONDER:
4818 l2cap_move_done(chan);
4819 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4821 case L2CAP_MOVE_ROLE_INITIATOR:
4822 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4823 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4824 /* Remote has only sent pending or
4825 * success responses, clean up
4827 l2cap_move_done(chan);
4830 /* Other amp move states imply that the move
4831 * has already aborted
4833 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4838 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4839 struct hci_chan *hchan)
4841 struct l2cap_conf_rsp rsp;
4843 chan->hs_hchan = hchan;
4844 chan->hs_hcon->l2cap_data = chan->conn;
4846 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4848 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4851 set_default_fcs(chan);
4853 err = l2cap_ertm_init(chan);
4855 l2cap_send_disconn_req(chan, -err);
4857 l2cap_chan_ready(chan);
4861 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4862 struct hci_chan *hchan)
4864 chan->hs_hcon = hchan->conn;
4865 chan->hs_hcon->l2cap_data = chan->conn;
4867 BT_DBG("move_state %d", chan->move_state);
4869 switch (chan->move_state) {
4870 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4871 /* Move confirm will be sent after a success
4872 * response is received
4874 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4876 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4877 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4878 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4879 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4880 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4881 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4882 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4883 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4884 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4888 /* Move was not in expected state, free the channel */
4889 __release_logical_link(chan);
4891 chan->move_state = L2CAP_MOVE_STABLE;
4895 /* Call with chan locked */
4896 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4899 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4902 l2cap_logical_fail(chan);
4903 __release_logical_link(chan);
4907 if (chan->state != BT_CONNECTED) {
4908 /* Ignore logical link if channel is on BR/EDR */
4909 if (chan->local_amp_id != AMP_ID_BREDR)
4910 l2cap_logical_finish_create(chan, hchan);
4912 l2cap_logical_finish_move(chan, hchan);
4916 void l2cap_move_start(struct l2cap_chan *chan)
4918 BT_DBG("chan %p", chan);
4920 if (chan->local_amp_id == AMP_ID_BREDR) {
4921 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4923 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4924 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4925 /* Placeholder - start physical link setup */
4927 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4928 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4930 l2cap_move_setup(chan);
4931 l2cap_send_move_chan_req(chan, 0);
4935 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4936 u8 local_amp_id, u8 remote_amp_id)
4938 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4939 local_amp_id, remote_amp_id);
4941 chan->fcs = L2CAP_FCS_NONE;
4943 /* Outgoing channel on AMP */
4944 if (chan->state == BT_CONNECT) {
4945 if (result == L2CAP_CR_SUCCESS) {
4946 chan->local_amp_id = local_amp_id;
4947 l2cap_send_create_chan_req(chan, remote_amp_id);
4949 /* Revert to BR/EDR connect */
4950 l2cap_send_conn_req(chan);
4956 /* Incoming channel on AMP */
4957 if (__l2cap_no_conn_pending(chan)) {
4958 struct l2cap_conn_rsp rsp;
4960 rsp.scid = cpu_to_le16(chan->dcid);
4961 rsp.dcid = cpu_to_le16(chan->scid);
4963 if (result == L2CAP_CR_SUCCESS) {
4964 /* Send successful response */
4965 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4966 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4968 /* Send negative response */
4969 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4970 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4973 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4976 if (result == L2CAP_CR_SUCCESS) {
4977 l2cap_state_change(chan, BT_CONFIG);
4978 set_bit(CONF_REQ_SENT, &chan->conf_state);
4979 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4981 l2cap_build_conf_req(chan, buf), buf);
4982 chan->num_conf_req++;
4987 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4990 l2cap_move_setup(chan);
4991 chan->move_id = local_amp_id;
4992 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4994 l2cap_send_move_chan_req(chan, remote_amp_id);
4997 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4999 struct hci_chan *hchan = NULL;
5001 /* Placeholder - get hci_chan for logical link */
5004 if (hchan->state == BT_CONNECTED) {
5005 /* Logical link is ready to go */
5006 chan->hs_hcon = hchan->conn;
5007 chan->hs_hcon->l2cap_data = chan->conn;
5008 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5009 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5011 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5013 /* Wait for logical link to be ready */
5014 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5017 /* Logical link not available */
5018 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5022 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5024 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5026 if (result == -EINVAL)
5027 rsp_result = L2CAP_MR_BAD_ID;
5029 rsp_result = L2CAP_MR_NOT_ALLOWED;
5031 l2cap_send_move_chan_rsp(chan, rsp_result);
5034 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5035 chan->move_state = L2CAP_MOVE_STABLE;
5037 /* Restart data transmission */
5038 l2cap_ertm_send(chan);
5041 /* Invoke with locked chan */
5042 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5044 u8 local_amp_id = chan->local_amp_id;
5045 u8 remote_amp_id = chan->remote_amp_id;
5047 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5048 chan, result, local_amp_id, remote_amp_id);
5050 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5051 l2cap_chan_unlock(chan);
5055 if (chan->state != BT_CONNECTED) {
5056 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5057 } else if (result != L2CAP_MR_SUCCESS) {
5058 l2cap_do_move_cancel(chan, result);
5060 switch (chan->move_role) {
5061 case L2CAP_MOVE_ROLE_INITIATOR:
5062 l2cap_do_move_initiate(chan, local_amp_id,
5065 case L2CAP_MOVE_ROLE_RESPONDER:
5066 l2cap_do_move_respond(chan, result);
5069 l2cap_do_move_cancel(chan, result);
5075 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5076 struct l2cap_cmd_hdr *cmd,
5077 u16 cmd_len, void *data)
5079 struct l2cap_move_chan_req *req = data;
5080 struct l2cap_move_chan_rsp rsp;
5081 struct l2cap_chan *chan;
5083 u16 result = L2CAP_MR_NOT_ALLOWED;
5085 if (cmd_len != sizeof(*req))
5088 icid = le16_to_cpu(req->icid);
5090 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5092 if (!conn->hs_enabled)
5095 chan = l2cap_get_chan_by_dcid(conn, icid);
5097 rsp.icid = cpu_to_le16(icid);
5098 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5099 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5104 chan->ident = cmd->ident;
5106 if (chan->scid < L2CAP_CID_DYN_START ||
5107 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5108 (chan->mode != L2CAP_MODE_ERTM &&
5109 chan->mode != L2CAP_MODE_STREAMING)) {
5110 result = L2CAP_MR_NOT_ALLOWED;
5111 goto send_move_response;
5114 if (chan->local_amp_id == req->dest_amp_id) {
5115 result = L2CAP_MR_SAME_ID;
5116 goto send_move_response;
5119 if (req->dest_amp_id != AMP_ID_BREDR) {
5120 struct hci_dev *hdev;
5121 hdev = hci_dev_get(req->dest_amp_id);
5122 if (!hdev || hdev->dev_type != HCI_AMP ||
5123 !test_bit(HCI_UP, &hdev->flags)) {
5127 result = L2CAP_MR_BAD_ID;
5128 goto send_move_response;
5133 /* Detect a move collision. Only send a collision response
5134 * if this side has "lost", otherwise proceed with the move.
5135 * The winner has the larger bd_addr.
5137 if ((__chan_is_moving(chan) ||
5138 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5139 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5140 result = L2CAP_MR_COLLISION;
5141 goto send_move_response;
5144 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5145 l2cap_move_setup(chan);
5146 chan->move_id = req->dest_amp_id;
5149 if (req->dest_amp_id == AMP_ID_BREDR) {
5150 /* Moving to BR/EDR */
5151 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5152 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5153 result = L2CAP_MR_PEND;
5155 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5156 result = L2CAP_MR_SUCCESS;
5159 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5160 /* Placeholder - uncomment when amp functions are available */
5161 /*amp_accept_physical(chan, req->dest_amp_id);*/
5162 result = L2CAP_MR_PEND;
5166 l2cap_send_move_chan_rsp(chan, result);
5168 l2cap_chan_unlock(chan);
5173 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5175 struct l2cap_chan *chan;
5176 struct hci_chan *hchan = NULL;
5178 chan = l2cap_get_chan_by_scid(conn, icid);
5180 l2cap_send_move_chan_cfm_icid(conn, icid);
5184 __clear_chan_timer(chan);
5185 if (result == L2CAP_MR_PEND)
5186 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5188 switch (chan->move_state) {
5189 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5190 /* Move confirm will be sent when logical link
5193 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5195 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5196 if (result == L2CAP_MR_PEND) {
5198 } else if (test_bit(CONN_LOCAL_BUSY,
5199 &chan->conn_state)) {
5200 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5202 /* Logical link is up or moving to BR/EDR,
5205 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5206 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5209 case L2CAP_MOVE_WAIT_RSP:
5211 if (result == L2CAP_MR_SUCCESS) {
5212 /* Remote is ready, send confirm immediately
5213 * after logical link is ready
5215 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5217 /* Both logical link and move success
5218 * are required to confirm
5220 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5223 /* Placeholder - get hci_chan for logical link */
5225 /* Logical link not available */
5226 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5230 /* If the logical link is not yet connected, do not
5231 * send confirmation.
5233 if (hchan->state != BT_CONNECTED)
5236 /* Logical link is already ready to go */
5238 chan->hs_hcon = hchan->conn;
5239 chan->hs_hcon->l2cap_data = chan->conn;
5241 if (result == L2CAP_MR_SUCCESS) {
5242 /* Can confirm now */
5243 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5245 /* Now only need move success
5248 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5251 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5254 /* Any other amp move state means the move failed. */
5255 chan->move_id = chan->local_amp_id;
5256 l2cap_move_done(chan);
5257 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5260 l2cap_chan_unlock(chan);
5263 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5266 struct l2cap_chan *chan;
5268 chan = l2cap_get_chan_by_ident(conn, ident);
5270 /* Could not locate channel, icid is best guess */
5271 l2cap_send_move_chan_cfm_icid(conn, icid);
5275 __clear_chan_timer(chan);
5277 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5278 if (result == L2CAP_MR_COLLISION) {
5279 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5281 /* Cleanup - cancel move */
5282 chan->move_id = chan->local_amp_id;
5283 l2cap_move_done(chan);
5287 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5289 l2cap_chan_unlock(chan);
5292 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5293 struct l2cap_cmd_hdr *cmd,
5294 u16 cmd_len, void *data)
5296 struct l2cap_move_chan_rsp *rsp = data;
5299 if (cmd_len != sizeof(*rsp))
5302 icid = le16_to_cpu(rsp->icid);
5303 result = le16_to_cpu(rsp->result);
5305 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5307 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5308 l2cap_move_continue(conn, icid, result);
5310 l2cap_move_fail(conn, cmd->ident, icid, result);
5315 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5316 struct l2cap_cmd_hdr *cmd,
5317 u16 cmd_len, void *data)
5319 struct l2cap_move_chan_cfm *cfm = data;
5320 struct l2cap_chan *chan;
5323 if (cmd_len != sizeof(*cfm))
5326 icid = le16_to_cpu(cfm->icid);
5327 result = le16_to_cpu(cfm->result);
5329 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5331 chan = l2cap_get_chan_by_dcid(conn, icid);
5333 /* Spec requires a response even if the icid was not found */
5334 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5338 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5339 if (result == L2CAP_MC_CONFIRMED) {
5340 chan->local_amp_id = chan->move_id;
5341 if (chan->local_amp_id == AMP_ID_BREDR)
5342 __release_logical_link(chan);
5344 chan->move_id = chan->local_amp_id;
5347 l2cap_move_done(chan);
5350 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5352 l2cap_chan_unlock(chan);
5357 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5358 struct l2cap_cmd_hdr *cmd,
5359 u16 cmd_len, void *data)
5361 struct l2cap_move_chan_cfm_rsp *rsp = data;
5362 struct l2cap_chan *chan;
5365 if (cmd_len != sizeof(*rsp))
5368 icid = le16_to_cpu(rsp->icid);
5370 BT_DBG("icid 0x%4.4x", icid);
5372 chan = l2cap_get_chan_by_scid(conn, icid);
5376 __clear_chan_timer(chan);
5378 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5379 chan->local_amp_id = chan->move_id;
5381 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5382 __release_logical_link(chan);
5384 l2cap_move_done(chan);
5387 l2cap_chan_unlock(chan);
5392 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5397 if (min > max || min < 6 || max > 3200)
5400 if (to_multiplier < 10 || to_multiplier > 3200)
5403 if (max >= to_multiplier * 8)
5406 max_latency = (to_multiplier * 8 / max) - 1;
5407 if (latency > 499 || latency > max_latency)
5413 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5414 struct l2cap_cmd_hdr *cmd,
5415 u16 cmd_len, u8 *data)
5417 struct hci_conn *hcon = conn->hcon;
5418 struct l2cap_conn_param_update_req *req;
5419 struct l2cap_conn_param_update_rsp rsp;
5420 u16 min, max, latency, to_multiplier;
5423 if (!(hcon->link_mode & HCI_LM_MASTER))
5426 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5429 req = (struct l2cap_conn_param_update_req *) data;
5430 min = __le16_to_cpu(req->min);
5431 max = __le16_to_cpu(req->max);
5432 latency = __le16_to_cpu(req->latency);
5433 to_multiplier = __le16_to_cpu(req->to_multiplier);
5435 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5436 min, max, latency, to_multiplier);
5438 memset(&rsp, 0, sizeof(rsp));
5440 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5442 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5444 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5446 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5450 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5455 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5456 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5459 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5460 u16 dcid, mtu, mps, credits, result;
5461 struct l2cap_chan *chan;
5464 if (cmd_len < sizeof(*rsp))
5467 dcid = __le16_to_cpu(rsp->dcid);
5468 mtu = __le16_to_cpu(rsp->mtu);
5469 mps = __le16_to_cpu(rsp->mps);
5470 credits = __le16_to_cpu(rsp->credits);
5471 result = __le16_to_cpu(rsp->result);
5473 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5476 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5477 dcid, mtu, mps, credits, result);
5479 mutex_lock(&conn->chan_lock);
5481 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5489 l2cap_chan_lock(chan);
5492 case L2CAP_CR_SUCCESS:
5496 chan->remote_mps = mps;
5497 chan->tx_credits = credits;
5498 l2cap_chan_ready(chan);
5502 l2cap_chan_del(chan, ECONNREFUSED);
5506 l2cap_chan_unlock(chan);
5509 mutex_unlock(&conn->chan_lock);
5514 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5515 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5520 switch (cmd->code) {
5521 case L2CAP_COMMAND_REJ:
5522 l2cap_command_rej(conn, cmd, cmd_len, data);
5525 case L2CAP_CONN_REQ:
5526 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5529 case L2CAP_CONN_RSP:
5530 case L2CAP_CREATE_CHAN_RSP:
5531 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5534 case L2CAP_CONF_REQ:
5535 err = l2cap_config_req(conn, cmd, cmd_len, data);
5538 case L2CAP_CONF_RSP:
5539 l2cap_config_rsp(conn, cmd, cmd_len, data);
5542 case L2CAP_DISCONN_REQ:
5543 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5546 case L2CAP_DISCONN_RSP:
5547 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5550 case L2CAP_ECHO_REQ:
5551 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5554 case L2CAP_ECHO_RSP:
5557 case L2CAP_INFO_REQ:
5558 err = l2cap_information_req(conn, cmd, cmd_len, data);
5561 case L2CAP_INFO_RSP:
5562 l2cap_information_rsp(conn, cmd, cmd_len, data);
5565 case L2CAP_CREATE_CHAN_REQ:
5566 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5569 case L2CAP_MOVE_CHAN_REQ:
5570 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5573 case L2CAP_MOVE_CHAN_RSP:
5574 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5577 case L2CAP_MOVE_CHAN_CFM:
5578 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5581 case L2CAP_MOVE_CHAN_CFM_RSP:
5582 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5586 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5594 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5595 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5598 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5599 struct l2cap_le_conn_rsp rsp;
5600 struct l2cap_chan *chan, *pchan;
5601 u16 dcid, scid, credits, mtu, mps;
5605 if (cmd_len != sizeof(*req))
5608 scid = __le16_to_cpu(req->scid);
5609 mtu = __le16_to_cpu(req->mtu);
5610 mps = __le16_to_cpu(req->mps);
5615 if (mtu < 23 || mps < 23)
5618 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5621 /* Check if we have socket listening on psm */
5622 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5623 &conn->hcon->dst, LE_LINK);
5625 result = L2CAP_CR_BAD_PSM;
5630 mutex_lock(&conn->chan_lock);
5631 l2cap_chan_lock(pchan);
5633 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5634 result = L2CAP_CR_AUTHENTICATION;
5636 goto response_unlock;
5639 /* Check if we already have channel with that dcid */
5640 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5641 result = L2CAP_CR_NO_MEM;
5643 goto response_unlock;
5646 chan = pchan->ops->new_connection(pchan);
5648 result = L2CAP_CR_NO_MEM;
5649 goto response_unlock;
5652 bacpy(&chan->src, &conn->hcon->src);
5653 bacpy(&chan->dst, &conn->hcon->dst);
5654 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5655 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5659 chan->remote_mps = mps;
5660 chan->tx_credits = __le16_to_cpu(req->credits);
5662 __l2cap_chan_add(conn, chan);
5664 credits = chan->rx_credits;
5666 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5668 chan->ident = cmd->ident;
5670 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5671 l2cap_state_change(chan, BT_CONNECT2);
5672 result = L2CAP_CR_PEND;
5673 chan->ops->defer(chan);
5675 l2cap_chan_ready(chan);
5676 result = L2CAP_CR_SUCCESS;
5680 l2cap_chan_unlock(pchan);
5681 mutex_unlock(&conn->chan_lock);
5683 if (result == L2CAP_CR_PEND)
5688 rsp.mtu = cpu_to_le16(chan->imtu);
5689 rsp.mps = cpu_to_le16(chan->mps);
5695 rsp.dcid = cpu_to_le16(dcid);
5696 rsp.credits = cpu_to_le16(credits);
5697 rsp.result = cpu_to_le16(result);
5699 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5704 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5705 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5708 struct l2cap_le_credits *pkt;
5709 struct l2cap_chan *chan;
5712 if (cmd_len != sizeof(*pkt))
5715 pkt = (struct l2cap_le_credits *) data;
5716 cid = __le16_to_cpu(pkt->cid);
5717 credits = __le16_to_cpu(pkt->credits);
5719 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5721 chan = l2cap_get_chan_by_dcid(conn, cid);
5725 chan->tx_credits += credits;
5727 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5728 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5732 if (chan->tx_credits)
5733 chan->ops->resume(chan);
5735 l2cap_chan_unlock(chan);
5740 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5741 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5746 if (!enable_lecoc) {
5747 switch (cmd->code) {
5748 case L2CAP_LE_CONN_REQ:
5749 case L2CAP_LE_CONN_RSP:
5750 case L2CAP_LE_CREDITS:
5751 case L2CAP_DISCONN_REQ:
5752 case L2CAP_DISCONN_RSP:
5757 switch (cmd->code) {
5758 case L2CAP_COMMAND_REJ:
5761 case L2CAP_CONN_PARAM_UPDATE_REQ:
5762 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5765 case L2CAP_CONN_PARAM_UPDATE_RSP:
5768 case L2CAP_LE_CONN_RSP:
5769 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5772 case L2CAP_LE_CONN_REQ:
5773 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5776 case L2CAP_LE_CREDITS:
5777 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5780 case L2CAP_DISCONN_REQ:
5781 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5784 case L2CAP_DISCONN_RSP:
5785 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5789 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5797 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5798 struct sk_buff *skb)
5800 struct hci_conn *hcon = conn->hcon;
5801 struct l2cap_cmd_hdr *cmd;
5805 if (hcon->type != LE_LINK)
5808 if (skb->len < L2CAP_CMD_HDR_SIZE)
5811 cmd = (void *) skb->data;
5812 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5814 len = le16_to_cpu(cmd->len);
5816 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5818 if (len != skb->len || !cmd->ident) {
5819 BT_DBG("corrupted command");
5823 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5825 struct l2cap_cmd_rej_unk rej;
5827 BT_ERR("Wrong link type (%d)", err);
5829 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5830 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5838 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5839 struct sk_buff *skb)
5841 struct hci_conn *hcon = conn->hcon;
5842 u8 *data = skb->data;
5844 struct l2cap_cmd_hdr cmd;
5847 l2cap_raw_recv(conn, skb);
5849 if (hcon->type != ACL_LINK)
5852 while (len >= L2CAP_CMD_HDR_SIZE) {
5854 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5855 data += L2CAP_CMD_HDR_SIZE;
5856 len -= L2CAP_CMD_HDR_SIZE;
5858 cmd_len = le16_to_cpu(cmd.len);
5860 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5863 if (cmd_len > len || !cmd.ident) {
5864 BT_DBG("corrupted command");
5868 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5870 struct l2cap_cmd_rej_unk rej;
5872 BT_ERR("Wrong link type (%d)", err);
5874 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5875 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5887 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5889 u16 our_fcs, rcv_fcs;
5892 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5893 hdr_size = L2CAP_EXT_HDR_SIZE;
5895 hdr_size = L2CAP_ENH_HDR_SIZE;
5897 if (chan->fcs == L2CAP_FCS_CRC16) {
5898 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5899 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5900 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5902 if (our_fcs != rcv_fcs)
5908 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5910 struct l2cap_ctrl control;
5912 BT_DBG("chan %p", chan);
5914 memset(&control, 0, sizeof(control));
5917 control.reqseq = chan->buffer_seq;
5918 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5920 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5921 control.super = L2CAP_SUPER_RNR;
5922 l2cap_send_sframe(chan, &control);
5925 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5926 chan->unacked_frames > 0)
5927 __set_retrans_timer(chan);
5929 /* Send pending iframes */
5930 l2cap_ertm_send(chan);
5932 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5933 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5934 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5937 control.super = L2CAP_SUPER_RR;
5938 l2cap_send_sframe(chan, &control);
5942 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5943 struct sk_buff **last_frag)
5945 /* skb->len reflects data in skb as well as all fragments
5946 * skb->data_len reflects only data in fragments
5948 if (!skb_has_frag_list(skb))
5949 skb_shinfo(skb)->frag_list = new_frag;
5951 new_frag->next = NULL;
5953 (*last_frag)->next = new_frag;
5954 *last_frag = new_frag;
5956 skb->len += new_frag->len;
5957 skb->data_len += new_frag->len;
5958 skb->truesize += new_frag->truesize;
5961 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5962 struct l2cap_ctrl *control)
5966 switch (control->sar) {
5967 case L2CAP_SAR_UNSEGMENTED:
5971 err = chan->ops->recv(chan, skb);
5974 case L2CAP_SAR_START:
5978 chan->sdu_len = get_unaligned_le16(skb->data);
5979 skb_pull(skb, L2CAP_SDULEN_SIZE);
5981 if (chan->sdu_len > chan->imtu) {
5986 if (skb->len >= chan->sdu_len)
5990 chan->sdu_last_frag = skb;
5996 case L2CAP_SAR_CONTINUE:
6000 append_skb_frag(chan->sdu, skb,
6001 &chan->sdu_last_frag);
6004 if (chan->sdu->len >= chan->sdu_len)
6014 append_skb_frag(chan->sdu, skb,
6015 &chan->sdu_last_frag);
6018 if (chan->sdu->len != chan->sdu_len)
6021 err = chan->ops->recv(chan, chan->sdu);
6024 /* Reassembly complete */
6026 chan->sdu_last_frag = NULL;
6034 kfree_skb(chan->sdu);
6036 chan->sdu_last_frag = NULL;
6043 static int l2cap_resegment(struct l2cap_chan *chan)
6049 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6053 if (chan->mode != L2CAP_MODE_ERTM)
6056 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6057 l2cap_tx(chan, NULL, NULL, event);
6060 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6063 /* Pass sequential frames to l2cap_reassemble_sdu()
6064 * until a gap is encountered.
6067 BT_DBG("chan %p", chan);
6069 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6070 struct sk_buff *skb;
6071 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6072 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6074 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6079 skb_unlink(skb, &chan->srej_q);
6080 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6081 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6086 if (skb_queue_empty(&chan->srej_q)) {
6087 chan->rx_state = L2CAP_RX_STATE_RECV;
6088 l2cap_send_ack(chan);
6094 static void l2cap_handle_srej(struct l2cap_chan *chan,
6095 struct l2cap_ctrl *control)
6097 struct sk_buff *skb;
6099 BT_DBG("chan %p, control %p", chan, control);
6101 if (control->reqseq == chan->next_tx_seq) {
6102 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6103 l2cap_send_disconn_req(chan, ECONNRESET);
6107 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6110 BT_DBG("Seq %d not available for retransmission",
6115 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6116 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6117 l2cap_send_disconn_req(chan, ECONNRESET);
6121 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6123 if (control->poll) {
6124 l2cap_pass_to_tx(chan, control);
6126 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6127 l2cap_retransmit(chan, control);
6128 l2cap_ertm_send(chan);
6130 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6131 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6132 chan->srej_save_reqseq = control->reqseq;
6135 l2cap_pass_to_tx_fbit(chan, control);
6137 if (control->final) {
6138 if (chan->srej_save_reqseq != control->reqseq ||
6139 !test_and_clear_bit(CONN_SREJ_ACT,
6141 l2cap_retransmit(chan, control);
6143 l2cap_retransmit(chan, control);
6144 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6145 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6146 chan->srej_save_reqseq = control->reqseq;
6152 static void l2cap_handle_rej(struct l2cap_chan *chan,
6153 struct l2cap_ctrl *control)
6155 struct sk_buff *skb;
6157 BT_DBG("chan %p, control %p", chan, control);
6159 if (control->reqseq == chan->next_tx_seq) {
6160 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6161 l2cap_send_disconn_req(chan, ECONNRESET);
6165 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6167 if (chan->max_tx && skb &&
6168 bt_cb(skb)->control.retries >= chan->max_tx) {
6169 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6170 l2cap_send_disconn_req(chan, ECONNRESET);
6174 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6176 l2cap_pass_to_tx(chan, control);
6178 if (control->final) {
6179 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6180 l2cap_retransmit_all(chan, control);
6182 l2cap_retransmit_all(chan, control);
6183 l2cap_ertm_send(chan);
6184 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6185 set_bit(CONN_REJ_ACT, &chan->conn_state);
6189 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6191 BT_DBG("chan %p, txseq %d", chan, txseq);
6193 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6194 chan->expected_tx_seq);
6196 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6197 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6199 /* See notes below regarding "double poll" and
6202 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6203 BT_DBG("Invalid/Ignore - after SREJ");
6204 return L2CAP_TXSEQ_INVALID_IGNORE;
6206 BT_DBG("Invalid - in window after SREJ sent");
6207 return L2CAP_TXSEQ_INVALID;
6211 if (chan->srej_list.head == txseq) {
6212 BT_DBG("Expected SREJ");
6213 return L2CAP_TXSEQ_EXPECTED_SREJ;
6216 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6217 BT_DBG("Duplicate SREJ - txseq already stored");
6218 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6221 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6222 BT_DBG("Unexpected SREJ - not requested");
6223 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6227 if (chan->expected_tx_seq == txseq) {
6228 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6230 BT_DBG("Invalid - txseq outside tx window");
6231 return L2CAP_TXSEQ_INVALID;
6234 return L2CAP_TXSEQ_EXPECTED;
6238 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6239 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6240 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6241 return L2CAP_TXSEQ_DUPLICATE;
6244 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6245 /* A source of invalid packets is a "double poll" condition,
6246 * where delays cause us to send multiple poll packets. If
6247 * the remote stack receives and processes both polls,
6248 * sequence numbers can wrap around in such a way that a
6249 * resent frame has a sequence number that looks like new data
6250 * with a sequence gap. This would trigger an erroneous SREJ
6253 * Fortunately, this is impossible with a tx window that's
6254 * less than half of the maximum sequence number, which allows
6255 * invalid frames to be safely ignored.
6257 * With tx window sizes greater than half of the tx window
6258 * maximum, the frame is invalid and cannot be ignored. This
6259 * causes a disconnect.
6262 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6263 BT_DBG("Invalid/Ignore - txseq outside tx window");
6264 return L2CAP_TXSEQ_INVALID_IGNORE;
6266 BT_DBG("Invalid - txseq outside tx window");
6267 return L2CAP_TXSEQ_INVALID;
6270 BT_DBG("Unexpected - txseq indicates missing frames");
6271 return L2CAP_TXSEQ_UNEXPECTED;
6275 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6276 struct l2cap_ctrl *control,
6277 struct sk_buff *skb, u8 event)
6280 bool skb_in_use = false;
6282 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6286 case L2CAP_EV_RECV_IFRAME:
6287 switch (l2cap_classify_txseq(chan, control->txseq)) {
6288 case L2CAP_TXSEQ_EXPECTED:
6289 l2cap_pass_to_tx(chan, control);
6291 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6292 BT_DBG("Busy, discarding expected seq %d",
6297 chan->expected_tx_seq = __next_seq(chan,
6300 chan->buffer_seq = chan->expected_tx_seq;
6303 err = l2cap_reassemble_sdu(chan, skb, control);
6307 if (control->final) {
6308 if (!test_and_clear_bit(CONN_REJ_ACT,
6309 &chan->conn_state)) {
6311 l2cap_retransmit_all(chan, control);
6312 l2cap_ertm_send(chan);
6316 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6317 l2cap_send_ack(chan);
6319 case L2CAP_TXSEQ_UNEXPECTED:
6320 l2cap_pass_to_tx(chan, control);
6322 /* Can't issue SREJ frames in the local busy state.
6323 * Drop this frame, it will be seen as missing
6324 * when local busy is exited.
6326 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6327 BT_DBG("Busy, discarding unexpected seq %d",
6332 /* There was a gap in the sequence, so an SREJ
6333 * must be sent for each missing frame. The
6334 * current frame is stored for later use.
6336 skb_queue_tail(&chan->srej_q, skb);
6338 BT_DBG("Queued %p (queue len %d)", skb,
6339 skb_queue_len(&chan->srej_q));
6341 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6342 l2cap_seq_list_clear(&chan->srej_list);
6343 l2cap_send_srej(chan, control->txseq);
6345 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6347 case L2CAP_TXSEQ_DUPLICATE:
6348 l2cap_pass_to_tx(chan, control);
6350 case L2CAP_TXSEQ_INVALID_IGNORE:
6352 case L2CAP_TXSEQ_INVALID:
6354 l2cap_send_disconn_req(chan, ECONNRESET);
6358 case L2CAP_EV_RECV_RR:
6359 l2cap_pass_to_tx(chan, control);
6360 if (control->final) {
6361 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6363 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6364 !__chan_is_moving(chan)) {
6366 l2cap_retransmit_all(chan, control);
6369 l2cap_ertm_send(chan);
6370 } else if (control->poll) {
6371 l2cap_send_i_or_rr_or_rnr(chan);
6373 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6374 &chan->conn_state) &&
6375 chan->unacked_frames)
6376 __set_retrans_timer(chan);
6378 l2cap_ertm_send(chan);
6381 case L2CAP_EV_RECV_RNR:
6382 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6383 l2cap_pass_to_tx(chan, control);
6384 if (control && control->poll) {
6385 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6386 l2cap_send_rr_or_rnr(chan, 0);
6388 __clear_retrans_timer(chan);
6389 l2cap_seq_list_clear(&chan->retrans_list);
6391 case L2CAP_EV_RECV_REJ:
6392 l2cap_handle_rej(chan, control);
6394 case L2CAP_EV_RECV_SREJ:
6395 l2cap_handle_srej(chan, control);
6401 if (skb && !skb_in_use) {
6402 BT_DBG("Freeing %p", skb);
6409 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6410 struct l2cap_ctrl *control,
6411 struct sk_buff *skb, u8 event)
6414 u16 txseq = control->txseq;
6415 bool skb_in_use = false;
6417 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6421 case L2CAP_EV_RECV_IFRAME:
6422 switch (l2cap_classify_txseq(chan, txseq)) {
6423 case L2CAP_TXSEQ_EXPECTED:
6424 /* Keep frame for reassembly later */
6425 l2cap_pass_to_tx(chan, control);
6426 skb_queue_tail(&chan->srej_q, skb);
6428 BT_DBG("Queued %p (queue len %d)", skb,
6429 skb_queue_len(&chan->srej_q));
6431 chan->expected_tx_seq = __next_seq(chan, txseq);
6433 case L2CAP_TXSEQ_EXPECTED_SREJ:
6434 l2cap_seq_list_pop(&chan->srej_list);
6436 l2cap_pass_to_tx(chan, control);
6437 skb_queue_tail(&chan->srej_q, skb);
6439 BT_DBG("Queued %p (queue len %d)", skb,
6440 skb_queue_len(&chan->srej_q));
6442 err = l2cap_rx_queued_iframes(chan);
6447 case L2CAP_TXSEQ_UNEXPECTED:
6448 /* Got a frame that can't be reassembled yet.
6449 * Save it for later, and send SREJs to cover
6450 * the missing frames.
6452 skb_queue_tail(&chan->srej_q, skb);
6454 BT_DBG("Queued %p (queue len %d)", skb,
6455 skb_queue_len(&chan->srej_q));
6457 l2cap_pass_to_tx(chan, control);
6458 l2cap_send_srej(chan, control->txseq);
6460 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6461 /* This frame was requested with an SREJ, but
6462 * some expected retransmitted frames are
6463 * missing. Request retransmission of missing
6466 skb_queue_tail(&chan->srej_q, skb);
6468 BT_DBG("Queued %p (queue len %d)", skb,
6469 skb_queue_len(&chan->srej_q));
6471 l2cap_pass_to_tx(chan, control);
6472 l2cap_send_srej_list(chan, control->txseq);
6474 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6475 /* We've already queued this frame. Drop this copy. */
6476 l2cap_pass_to_tx(chan, control);
6478 case L2CAP_TXSEQ_DUPLICATE:
6479 /* Expecting a later sequence number, so this frame
6480 * was already received. Ignore it completely.
6483 case L2CAP_TXSEQ_INVALID_IGNORE:
6485 case L2CAP_TXSEQ_INVALID:
6487 l2cap_send_disconn_req(chan, ECONNRESET);
6491 case L2CAP_EV_RECV_RR:
6492 l2cap_pass_to_tx(chan, control);
6493 if (control->final) {
6494 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6496 if (!test_and_clear_bit(CONN_REJ_ACT,
6497 &chan->conn_state)) {
6499 l2cap_retransmit_all(chan, control);
6502 l2cap_ertm_send(chan);
6503 } else if (control->poll) {
6504 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6505 &chan->conn_state) &&
6506 chan->unacked_frames) {
6507 __set_retrans_timer(chan);
6510 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6511 l2cap_send_srej_tail(chan);
6513 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6514 &chan->conn_state) &&
6515 chan->unacked_frames)
6516 __set_retrans_timer(chan);
6518 l2cap_send_ack(chan);
6521 case L2CAP_EV_RECV_RNR:
6522 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6523 l2cap_pass_to_tx(chan, control);
6524 if (control->poll) {
6525 l2cap_send_srej_tail(chan);
6527 struct l2cap_ctrl rr_control;
6528 memset(&rr_control, 0, sizeof(rr_control));
6529 rr_control.sframe = 1;
6530 rr_control.super = L2CAP_SUPER_RR;
6531 rr_control.reqseq = chan->buffer_seq;
6532 l2cap_send_sframe(chan, &rr_control);
6536 case L2CAP_EV_RECV_REJ:
6537 l2cap_handle_rej(chan, control);
6539 case L2CAP_EV_RECV_SREJ:
6540 l2cap_handle_srej(chan, control);
6544 if (skb && !skb_in_use) {
6545 BT_DBG("Freeing %p", skb);
6552 static int l2cap_finish_move(struct l2cap_chan *chan)
6554 BT_DBG("chan %p", chan);
6556 chan->rx_state = L2CAP_RX_STATE_RECV;
6559 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6561 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6563 return l2cap_resegment(chan);
6566 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6567 struct l2cap_ctrl *control,
6568 struct sk_buff *skb, u8 event)
6572 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6578 l2cap_process_reqseq(chan, control->reqseq);
6580 if (!skb_queue_empty(&chan->tx_q))
6581 chan->tx_send_head = skb_peek(&chan->tx_q);
6583 chan->tx_send_head = NULL;
6585 /* Rewind next_tx_seq to the point expected
6588 chan->next_tx_seq = control->reqseq;
6589 chan->unacked_frames = 0;
6591 err = l2cap_finish_move(chan);
6595 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6596 l2cap_send_i_or_rr_or_rnr(chan);
6598 if (event == L2CAP_EV_RECV_IFRAME)
6601 return l2cap_rx_state_recv(chan, control, NULL, event);
6604 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6605 struct l2cap_ctrl *control,
6606 struct sk_buff *skb, u8 event)
6610 if (!control->final)
6613 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6615 chan->rx_state = L2CAP_RX_STATE_RECV;
6616 l2cap_process_reqseq(chan, control->reqseq);
6618 if (!skb_queue_empty(&chan->tx_q))
6619 chan->tx_send_head = skb_peek(&chan->tx_q);
6621 chan->tx_send_head = NULL;
6623 /* Rewind next_tx_seq to the point expected
6626 chan->next_tx_seq = control->reqseq;
6627 chan->unacked_frames = 0;
6630 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6632 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6634 err = l2cap_resegment(chan);
6637 err = l2cap_rx_state_recv(chan, control, skb, event);
6642 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6644 /* Make sure reqseq is for a packet that has been sent but not acked */
6647 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6648 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6651 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6652 struct sk_buff *skb, u8 event)
6656 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6657 control, skb, event, chan->rx_state);
6659 if (__valid_reqseq(chan, control->reqseq)) {
6660 switch (chan->rx_state) {
6661 case L2CAP_RX_STATE_RECV:
6662 err = l2cap_rx_state_recv(chan, control, skb, event);
6664 case L2CAP_RX_STATE_SREJ_SENT:
6665 err = l2cap_rx_state_srej_sent(chan, control, skb,
6668 case L2CAP_RX_STATE_WAIT_P:
6669 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6671 case L2CAP_RX_STATE_WAIT_F:
6672 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6679 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6680 control->reqseq, chan->next_tx_seq,
6681 chan->expected_ack_seq);
6682 l2cap_send_disconn_req(chan, ECONNRESET);
6688 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6689 struct sk_buff *skb)
6693 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6696 if (l2cap_classify_txseq(chan, control->txseq) ==
6697 L2CAP_TXSEQ_EXPECTED) {
6698 l2cap_pass_to_tx(chan, control);
6700 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6701 __next_seq(chan, chan->buffer_seq));
6703 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6705 l2cap_reassemble_sdu(chan, skb, control);
6708 kfree_skb(chan->sdu);
6711 chan->sdu_last_frag = NULL;
6715 BT_DBG("Freeing %p", skb);
6720 chan->last_acked_seq = control->txseq;
6721 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6726 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6728 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6732 __unpack_control(chan, skb);
6737 * We can just drop the corrupted I-frame here.
6738 * Receiver will miss it and start proper recovery
6739 * procedures and ask for retransmission.
6741 if (l2cap_check_fcs(chan, skb))
6744 if (!control->sframe && control->sar == L2CAP_SAR_START)
6745 len -= L2CAP_SDULEN_SIZE;
6747 if (chan->fcs == L2CAP_FCS_CRC16)
6748 len -= L2CAP_FCS_SIZE;
6750 if (len > chan->mps) {
6751 l2cap_send_disconn_req(chan, ECONNRESET);
6755 if (!control->sframe) {
6758 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6759 control->sar, control->reqseq, control->final,
6762 /* Validate F-bit - F=0 always valid, F=1 only
6763 * valid in TX WAIT_F
6765 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6768 if (chan->mode != L2CAP_MODE_STREAMING) {
6769 event = L2CAP_EV_RECV_IFRAME;
6770 err = l2cap_rx(chan, control, skb, event);
6772 err = l2cap_stream_rx(chan, control, skb);
6776 l2cap_send_disconn_req(chan, ECONNRESET);
6778 const u8 rx_func_to_event[4] = {
6779 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6780 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6783 /* Only I-frames are expected in streaming mode */
6784 if (chan->mode == L2CAP_MODE_STREAMING)
6787 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6788 control->reqseq, control->final, control->poll,
6792 BT_ERR("Trailing bytes: %d in sframe", len);
6793 l2cap_send_disconn_req(chan, ECONNRESET);
6797 /* Validate F and P bits */
6798 if (control->final && (control->poll ||
6799 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6802 event = rx_func_to_event[control->super];
6803 if (l2cap_rx(chan, control, skb, event))
6804 l2cap_send_disconn_req(chan, ECONNRESET);
6814 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6816 struct l2cap_conn *conn = chan->conn;
6817 struct l2cap_le_credits pkt;
6820 /* We return more credits to the sender only after the amount of
6821 * credits falls below half of the initial amount.
6823 if (chan->rx_credits >= (L2CAP_LE_MAX_CREDITS + 1) / 2)
6826 return_credits = L2CAP_LE_MAX_CREDITS - chan->rx_credits;
6828 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6830 chan->rx_credits += return_credits;
6832 pkt.cid = cpu_to_le16(chan->scid);
6833 pkt.credits = cpu_to_le16(return_credits);
6835 chan->ident = l2cap_get_ident(conn);
6837 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6840 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6844 if (!chan->rx_credits) {
6845 BT_ERR("No credits to receive LE L2CAP data");
6849 if (chan->imtu < skb->len) {
6850 BT_ERR("Too big LE L2CAP PDU");
6855 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6857 l2cap_chan_le_send_credits(chan);
6864 sdu_len = get_unaligned_le16(skb->data);
6865 skb_pull(skb, L2CAP_SDULEN_SIZE);
6867 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6868 sdu_len, skb->len, chan->imtu);
6870 if (sdu_len > chan->imtu) {
6871 BT_ERR("Too big LE L2CAP SDU length received");
6876 if (skb->len > sdu_len) {
6877 BT_ERR("Too much LE L2CAP data received");
6882 if (skb->len == sdu_len)
6883 return chan->ops->recv(chan, skb);
6886 chan->sdu_len = sdu_len;
6887 chan->sdu_last_frag = skb;
6892 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6893 chan->sdu->len, skb->len, chan->sdu_len);
6895 if (chan->sdu->len + skb->len > chan->sdu_len) {
6896 BT_ERR("Too much LE L2CAP data received");
6901 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6904 if (chan->sdu->len == chan->sdu_len) {
6905 err = chan->ops->recv(chan, chan->sdu);
6908 chan->sdu_last_frag = NULL;
6916 kfree_skb(chan->sdu);
6918 chan->sdu_last_frag = NULL;
6922 /* We can't return an error here since we took care of the skb
6923 * freeing internally. An error return would cause the caller to
6924 * do a double-free of the skb.
6929 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6930 struct sk_buff *skb)
6932 struct l2cap_chan *chan;
6934 chan = l2cap_get_chan_by_scid(conn, cid);
6936 if (cid == L2CAP_CID_A2MP) {
6937 chan = a2mp_channel_create(conn, skb);
6943 l2cap_chan_lock(chan);
6945 BT_DBG("unknown cid 0x%4.4x", cid);
6946 /* Drop packet and return */
6952 BT_DBG("chan %p, len %d", chan, skb->len);
6954 if (chan->state != BT_CONNECTED)
6957 switch (chan->mode) {
6958 case L2CAP_MODE_LE_FLOWCTL:
6959 if (l2cap_le_data_rcv(chan, skb) < 0)
6964 case L2CAP_MODE_BASIC:
6965 /* If socket recv buffers overflows we drop data here
6966 * which is *bad* because L2CAP has to be reliable.
6967 * But we don't have any other choice. L2CAP doesn't
6968 * provide flow control mechanism. */
6970 if (chan->imtu < skb->len)
6973 if (!chan->ops->recv(chan, skb))
6977 case L2CAP_MODE_ERTM:
6978 case L2CAP_MODE_STREAMING:
6979 l2cap_data_rcv(chan, skb);
6983 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6991 l2cap_chan_unlock(chan);
6994 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6995 struct sk_buff *skb)
6997 struct hci_conn *hcon = conn->hcon;
6998 struct l2cap_chan *chan;
7000 if (hcon->type != ACL_LINK)
7003 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7008 BT_DBG("chan %p, len %d", chan, skb->len);
7010 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7013 if (chan->imtu < skb->len)
7016 /* Store remote BD_ADDR and PSM for msg_name */
7017 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7018 bt_cb(skb)->psm = psm;
7020 if (!chan->ops->recv(chan, skb))
7027 static void l2cap_att_channel(struct l2cap_conn *conn,
7028 struct sk_buff *skb)
7030 struct hci_conn *hcon = conn->hcon;
7031 struct l2cap_chan *chan;
7033 if (hcon->type != LE_LINK)
7036 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7037 &hcon->src, &hcon->dst);
7041 BT_DBG("chan %p, len %d", chan, skb->len);
7043 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7046 if (chan->imtu < skb->len)
7049 if (!chan->ops->recv(chan, skb))
7056 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7058 struct l2cap_hdr *lh = (void *) skb->data;
7062 skb_pull(skb, L2CAP_HDR_SIZE);
7063 cid = __le16_to_cpu(lh->cid);
7064 len = __le16_to_cpu(lh->len);
7066 if (len != skb->len) {
7071 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7074 case L2CAP_CID_SIGNALING:
7075 l2cap_sig_channel(conn, skb);
7078 case L2CAP_CID_CONN_LESS:
7079 psm = get_unaligned((__le16 *) skb->data);
7080 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7081 l2cap_conless_channel(conn, psm, skb);
7085 l2cap_att_channel(conn, skb);
7088 case L2CAP_CID_LE_SIGNALING:
7089 l2cap_le_sig_channel(conn, skb);
7093 if (smp_sig_channel(conn, skb))
7094 l2cap_conn_del(conn->hcon, EACCES);
7098 l2cap_data_channel(conn, cid, skb);
7103 /* ---- L2CAP interface with lower layer (HCI) ---- */
7105 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7107 int exact = 0, lm1 = 0, lm2 = 0;
7108 struct l2cap_chan *c;
7110 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7112 /* Find listening sockets and check their link_mode */
7113 read_lock(&chan_list_lock);
7114 list_for_each_entry(c, &chan_list, global_l) {
7115 if (c->state != BT_LISTEN)
7118 if (!bacmp(&c->src, &hdev->bdaddr)) {
7119 lm1 |= HCI_LM_ACCEPT;
7120 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7121 lm1 |= HCI_LM_MASTER;
7123 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7124 lm2 |= HCI_LM_ACCEPT;
7125 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7126 lm2 |= HCI_LM_MASTER;
7129 read_unlock(&chan_list_lock);
7131 return exact ? lm1 : lm2;
7134 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7136 struct l2cap_conn *conn;
7138 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7141 conn = l2cap_conn_add(hcon);
7143 l2cap_conn_ready(conn);
7145 l2cap_conn_del(hcon, bt_to_errno(status));
7149 int l2cap_disconn_ind(struct hci_conn *hcon)
7151 struct l2cap_conn *conn = hcon->l2cap_data;
7153 BT_DBG("hcon %p", hcon);
7156 return HCI_ERROR_REMOTE_USER_TERM;
7157 return conn->disc_reason;
7160 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7162 BT_DBG("hcon %p reason %d", hcon, reason);
7164 l2cap_conn_del(hcon, bt_to_errno(reason));
7167 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7169 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7172 if (encrypt == 0x00) {
7173 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7174 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7175 } else if (chan->sec_level == BT_SECURITY_HIGH)
7176 l2cap_chan_close(chan, ECONNREFUSED);
7178 if (chan->sec_level == BT_SECURITY_MEDIUM)
7179 __clear_chan_timer(chan);
7183 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7185 struct l2cap_conn *conn = hcon->l2cap_data;
7186 struct l2cap_chan *chan;
7191 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7193 if (hcon->type == LE_LINK) {
7194 if (!status && encrypt)
7195 smp_distribute_keys(conn, 0);
7196 cancel_delayed_work(&conn->security_timer);
7199 mutex_lock(&conn->chan_lock);
7201 list_for_each_entry(chan, &conn->chan_l, list) {
7202 l2cap_chan_lock(chan);
7204 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7205 state_to_string(chan->state));
7207 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7208 l2cap_chan_unlock(chan);
7212 if (chan->scid == L2CAP_CID_ATT) {
7213 if (!status && encrypt) {
7214 chan->sec_level = hcon->sec_level;
7215 l2cap_chan_ready(chan);
7218 l2cap_chan_unlock(chan);
7222 if (!__l2cap_no_conn_pending(chan)) {
7223 l2cap_chan_unlock(chan);
7227 if (!status && (chan->state == BT_CONNECTED ||
7228 chan->state == BT_CONFIG)) {
7229 chan->ops->resume(chan);
7230 l2cap_check_encryption(chan, encrypt);
7231 l2cap_chan_unlock(chan);
7235 if (chan->state == BT_CONNECT) {
7237 l2cap_start_connection(chan);
7239 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7240 } else if (chan->state == BT_CONNECT2) {
7241 struct l2cap_conn_rsp rsp;
7245 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7246 res = L2CAP_CR_PEND;
7247 stat = L2CAP_CS_AUTHOR_PEND;
7248 chan->ops->defer(chan);
7250 l2cap_state_change(chan, BT_CONFIG);
7251 res = L2CAP_CR_SUCCESS;
7252 stat = L2CAP_CS_NO_INFO;
7255 l2cap_state_change(chan, BT_DISCONN);
7256 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7257 res = L2CAP_CR_SEC_BLOCK;
7258 stat = L2CAP_CS_NO_INFO;
7261 rsp.scid = cpu_to_le16(chan->dcid);
7262 rsp.dcid = cpu_to_le16(chan->scid);
7263 rsp.result = cpu_to_le16(res);
7264 rsp.status = cpu_to_le16(stat);
7265 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7268 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7269 res == L2CAP_CR_SUCCESS) {
7271 set_bit(CONF_REQ_SENT, &chan->conf_state);
7272 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7274 l2cap_build_conf_req(chan, buf),
7276 chan->num_conf_req++;
7280 l2cap_chan_unlock(chan);
7283 mutex_unlock(&conn->chan_lock);
7288 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7290 struct l2cap_conn *conn = hcon->l2cap_data;
7291 struct l2cap_hdr *hdr;
7294 /* For AMP controller do not create l2cap conn */
7295 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7299 conn = l2cap_conn_add(hcon);
7304 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7308 case ACL_START_NO_FLUSH:
7311 BT_ERR("Unexpected start frame (len %d)", skb->len);
7312 kfree_skb(conn->rx_skb);
7313 conn->rx_skb = NULL;
7315 l2cap_conn_unreliable(conn, ECOMM);
7318 /* Start fragment always begin with Basic L2CAP header */
7319 if (skb->len < L2CAP_HDR_SIZE) {
7320 BT_ERR("Frame is too short (len %d)", skb->len);
7321 l2cap_conn_unreliable(conn, ECOMM);
7325 hdr = (struct l2cap_hdr *) skb->data;
7326 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7328 if (len == skb->len) {
7329 /* Complete frame received */
7330 l2cap_recv_frame(conn, skb);
7334 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7336 if (skb->len > len) {
7337 BT_ERR("Frame is too long (len %d, expected len %d)",
7339 l2cap_conn_unreliable(conn, ECOMM);
7343 /* Allocate skb for the complete frame (with header) */
7344 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7348 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7350 conn->rx_len = len - skb->len;
7354 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7356 if (!conn->rx_len) {
7357 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7358 l2cap_conn_unreliable(conn, ECOMM);
7362 if (skb->len > conn->rx_len) {
7363 BT_ERR("Fragment is too long (len %d, expected %d)",
7364 skb->len, conn->rx_len);
7365 kfree_skb(conn->rx_skb);
7366 conn->rx_skb = NULL;
7368 l2cap_conn_unreliable(conn, ECOMM);
7372 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7374 conn->rx_len -= skb->len;
7376 if (!conn->rx_len) {
7377 /* Complete frame received. l2cap_recv_frame
7378 * takes ownership of the skb so set the global
7379 * rx_skb pointer to NULL first.
7381 struct sk_buff *rx_skb = conn->rx_skb;
7382 conn->rx_skb = NULL;
7383 l2cap_recv_frame(conn, rx_skb);
7393 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7395 struct l2cap_chan *c;
7397 read_lock(&chan_list_lock);
7399 list_for_each_entry(c, &chan_list, global_l) {
7400 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7402 c->state, __le16_to_cpu(c->psm),
7403 c->scid, c->dcid, c->imtu, c->omtu,
7404 c->sec_level, c->mode);
7407 read_unlock(&chan_list_lock);
7412 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7414 return single_open(file, l2cap_debugfs_show, inode->i_private);
7417 static const struct file_operations l2cap_debugfs_fops = {
7418 .open = l2cap_debugfs_open,
7420 .llseek = seq_lseek,
7421 .release = single_release,
7424 static struct dentry *l2cap_debugfs;
7426 int __init l2cap_init(void)
7430 err = l2cap_init_sockets();
7434 if (IS_ERR_OR_NULL(bt_debugfs))
7437 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7438 NULL, &l2cap_debugfs_fops);
7443 void l2cap_exit(void)
7445 debugfs_remove(l2cap_debugfs);
7446 l2cap_cleanup_sockets();
7449 module_param(disable_ertm, bool, 0644);
7450 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");