2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
218 if (conn->hcon->type == LE_LINK)
219 dyn_end = L2CAP_CID_LE_DYN_END;
221 dyn_end = L2CAP_CID_DYN_END;
223 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
224 if (!__l2cap_get_chan_by_scid(conn, cid))
231 static void l2cap_state_change(struct l2cap_chan *chan, int state)
233 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
234 state_to_string(state));
237 chan->ops->state_change(chan, state, 0);
240 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
244 chan->ops->state_change(chan, chan->state, err);
247 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
249 chan->ops->state_change(chan, chan->state, err);
252 static void __set_retrans_timer(struct l2cap_chan *chan)
254 if (!delayed_work_pending(&chan->monitor_timer) &&
255 chan->retrans_timeout) {
256 l2cap_set_timer(chan, &chan->retrans_timer,
257 msecs_to_jiffies(chan->retrans_timeout));
261 static void __set_monitor_timer(struct l2cap_chan *chan)
263 __clear_retrans_timer(chan);
264 if (chan->monitor_timeout) {
265 l2cap_set_timer(chan, &chan->monitor_timer,
266 msecs_to_jiffies(chan->monitor_timeout));
270 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
275 skb_queue_walk(head, skb) {
276 if (bt_cb(skb)->control.txseq == seq)
283 /* ---- L2CAP sequence number lists ---- */
285 /* For ERTM, ordered lists of sequence numbers must be tracked for
286 * SREJ requests that are received and for frames that are to be
287 * retransmitted. These seq_list functions implement a singly-linked
288 * list in an array, where membership in the list can also be checked
289 * in constant time. Items can also be added to the tail of the list
290 * and removed from the head in constant time, without further memory
294 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
296 size_t alloc_size, i;
298 /* Allocated size is a power of 2 to map sequence numbers
299 * (which may be up to 14 bits) in to a smaller array that is
300 * sized for the negotiated ERTM transmit windows.
302 alloc_size = roundup_pow_of_two(size);
304 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
308 seq_list->mask = alloc_size - 1;
309 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
310 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
311 for (i = 0; i < alloc_size; i++)
312 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
317 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
319 kfree(seq_list->list);
322 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
325 /* Constant-time check for list membership */
326 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
329 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
331 u16 mask = seq_list->mask;
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
334 /* In case someone tries to pop the head of an empty list */
335 return L2CAP_SEQ_LIST_CLEAR;
336 } else if (seq_list->head == seq) {
337 /* Head can be removed in constant time */
338 seq_list->head = seq_list->list[seq & mask];
339 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
341 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
342 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
343 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 /* Walk the list to find the sequence number */
347 u16 prev = seq_list->head;
348 while (seq_list->list[prev & mask] != seq) {
349 prev = seq_list->list[prev & mask];
350 if (prev == L2CAP_SEQ_LIST_TAIL)
351 return L2CAP_SEQ_LIST_CLEAR;
354 /* Unlink the number from the list and clear it */
355 seq_list->list[prev & mask] = seq_list->list[seq & mask];
356 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
357 if (seq_list->tail == seq)
358 seq_list->tail = prev;
363 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
365 /* Remove the head in constant time */
366 return l2cap_seq_list_remove(seq_list, seq_list->head);
369 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
376 for (i = 0; i <= seq_list->mask; i++)
377 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
379 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
380 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
383 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
385 u16 mask = seq_list->mask;
387 /* All appends happen in constant time */
389 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
392 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
393 seq_list->head = seq;
395 seq_list->list[seq_list->tail & mask] = seq;
397 seq_list->tail = seq;
398 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
401 static void l2cap_chan_timeout(struct work_struct *work)
403 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
405 struct l2cap_conn *conn = chan->conn;
408 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
410 mutex_lock(&conn->chan_lock);
411 l2cap_chan_lock(chan);
413 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
414 reason = ECONNREFUSED;
415 else if (chan->state == BT_CONNECT &&
416 chan->sec_level != BT_SECURITY_SDP)
417 reason = ECONNREFUSED;
421 l2cap_chan_close(chan, reason);
423 l2cap_chan_unlock(chan);
425 chan->ops->close(chan);
426 mutex_unlock(&conn->chan_lock);
428 l2cap_chan_put(chan);
431 struct l2cap_chan *l2cap_chan_create(void)
433 struct l2cap_chan *chan;
435 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 mutex_init(&chan->lock);
441 write_lock(&chan_list_lock);
442 list_add(&chan->global_l, &chan_list);
443 write_unlock(&chan_list_lock);
445 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
447 chan->state = BT_OPEN;
449 kref_init(&chan->kref);
451 /* This flag is cleared in l2cap_chan_ready() */
452 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
454 BT_DBG("chan %p", chan);
459 static void l2cap_chan_destroy(struct kref *kref)
461 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
463 BT_DBG("chan %p", chan);
465 write_lock(&chan_list_lock);
466 list_del(&chan->global_l);
467 write_unlock(&chan_list_lock);
472 void l2cap_chan_hold(struct l2cap_chan *c)
474 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
479 void l2cap_chan_put(struct l2cap_chan *c)
481 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
483 kref_put(&c->kref, l2cap_chan_destroy);
486 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
488 chan->fcs = L2CAP_FCS_CRC16;
489 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
492 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
493 chan->sec_level = BT_SECURITY_LOW;
495 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
498 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
500 chan->imtu = L2CAP_DEFAULT_MTU;
501 chan->omtu = L2CAP_LE_MIN_MTU;
502 chan->mode = L2CAP_MODE_LE_FLOWCTL;
503 chan->tx_credits = 0;
504 chan->rx_credits = L2CAP_LE_MAX_CREDITS;
506 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
507 chan->mps = chan->imtu;
509 chan->mps = L2CAP_LE_DEFAULT_MPS;
512 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
514 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
515 __le16_to_cpu(chan->psm), chan->dcid);
517 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
521 switch (chan->chan_type) {
522 case L2CAP_CHAN_CONN_ORIENTED:
523 if (conn->hcon->type == LE_LINK) {
524 if (chan->dcid == L2CAP_CID_ATT) {
525 chan->omtu = L2CAP_DEFAULT_MTU;
526 chan->scid = L2CAP_CID_ATT;
528 chan->scid = l2cap_alloc_cid(conn);
531 /* Alloc CID for connection-oriented socket */
532 chan->scid = l2cap_alloc_cid(conn);
533 chan->omtu = L2CAP_DEFAULT_MTU;
537 case L2CAP_CHAN_CONN_LESS:
538 /* Connectionless socket */
539 chan->scid = L2CAP_CID_CONN_LESS;
540 chan->dcid = L2CAP_CID_CONN_LESS;
541 chan->omtu = L2CAP_DEFAULT_MTU;
544 case L2CAP_CHAN_CONN_FIX_A2MP:
545 chan->scid = L2CAP_CID_A2MP;
546 chan->dcid = L2CAP_CID_A2MP;
547 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
548 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
552 /* Raw socket can send/recv signalling messages only */
553 chan->scid = L2CAP_CID_SIGNALING;
554 chan->dcid = L2CAP_CID_SIGNALING;
555 chan->omtu = L2CAP_DEFAULT_MTU;
558 chan->local_id = L2CAP_BESTEFFORT_ID;
559 chan->local_stype = L2CAP_SERV_BESTEFFORT;
560 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
561 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
562 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
563 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
565 l2cap_chan_hold(chan);
567 hci_conn_hold(conn->hcon);
569 list_add(&chan->list, &conn->chan_l);
572 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
574 mutex_lock(&conn->chan_lock);
575 __l2cap_chan_add(conn, chan);
576 mutex_unlock(&conn->chan_lock);
579 void l2cap_chan_del(struct l2cap_chan *chan, int err)
581 struct l2cap_conn *conn = chan->conn;
583 __clear_chan_timer(chan);
585 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
589 /* Delete from channel list */
590 list_del(&chan->list);
592 l2cap_chan_put(chan);
596 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
597 hci_conn_drop(conn->hcon);
599 if (mgr && mgr->bredr_chan == chan)
600 mgr->bredr_chan = NULL;
603 if (chan->hs_hchan) {
604 struct hci_chan *hs_hchan = chan->hs_hchan;
606 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
607 amp_disconnect_logical_link(hs_hchan);
610 chan->ops->teardown(chan, err);
612 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
616 case L2CAP_MODE_BASIC:
619 case L2CAP_MODE_LE_FLOWCTL:
620 skb_queue_purge(&chan->tx_q);
623 case L2CAP_MODE_ERTM:
624 __clear_retrans_timer(chan);
625 __clear_monitor_timer(chan);
626 __clear_ack_timer(chan);
628 skb_queue_purge(&chan->srej_q);
630 l2cap_seq_list_free(&chan->srej_list);
631 l2cap_seq_list_free(&chan->retrans_list);
635 case L2CAP_MODE_STREAMING:
636 skb_queue_purge(&chan->tx_q);
643 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
645 struct l2cap_conn *conn = chan->conn;
646 struct l2cap_le_conn_rsp rsp;
649 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
650 result = L2CAP_CR_AUTHORIZATION;
652 result = L2CAP_CR_BAD_PSM;
654 l2cap_state_change(chan, BT_DISCONN);
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.mtu = cpu_to_le16(chan->imtu);
658 rsp.mps = cpu_to_le16(chan->mps);
659 rsp.credits = cpu_to_le16(chan->rx_credits);
660 rsp.result = cpu_to_le16(result);
662 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
666 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_conn_rsp rsp;
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_SEC_BLOCK;
675 result = L2CAP_CR_BAD_PSM;
677 l2cap_state_change(chan, BT_DISCONN);
679 rsp.scid = cpu_to_le16(chan->dcid);
680 rsp.dcid = cpu_to_le16(chan->scid);
681 rsp.result = cpu_to_le16(result);
682 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
684 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
687 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
689 struct l2cap_conn *conn = chan->conn;
691 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
693 switch (chan->state) {
695 chan->ops->teardown(chan, 0);
700 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
701 * check for chan->psm.
703 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
704 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
705 l2cap_send_disconn_req(chan, reason);
707 l2cap_chan_del(chan, reason);
711 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
712 if (conn->hcon->type == ACL_LINK)
713 l2cap_chan_connect_reject(chan);
714 else if (conn->hcon->type == LE_LINK)
715 l2cap_chan_le_connect_reject(chan);
718 l2cap_chan_del(chan, reason);
723 l2cap_chan_del(chan, reason);
727 chan->ops->teardown(chan, 0);
732 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
734 switch (chan->chan_type) {
736 switch (chan->sec_level) {
737 case BT_SECURITY_HIGH:
738 return HCI_AT_DEDICATED_BONDING_MITM;
739 case BT_SECURITY_MEDIUM:
740 return HCI_AT_DEDICATED_BONDING;
742 return HCI_AT_NO_BONDING;
745 case L2CAP_CHAN_CONN_LESS:
746 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
747 if (chan->sec_level == BT_SECURITY_LOW)
748 chan->sec_level = BT_SECURITY_SDP;
750 if (chan->sec_level == BT_SECURITY_HIGH)
751 return HCI_AT_NO_BONDING_MITM;
753 return HCI_AT_NO_BONDING;
755 case L2CAP_CHAN_CONN_ORIENTED:
756 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
757 if (chan->sec_level == BT_SECURITY_LOW)
758 chan->sec_level = BT_SECURITY_SDP;
760 if (chan->sec_level == BT_SECURITY_HIGH)
761 return HCI_AT_NO_BONDING_MITM;
763 return HCI_AT_NO_BONDING;
767 switch (chan->sec_level) {
768 case BT_SECURITY_HIGH:
769 return HCI_AT_GENERAL_BONDING_MITM;
770 case BT_SECURITY_MEDIUM:
771 return HCI_AT_GENERAL_BONDING;
773 return HCI_AT_NO_BONDING;
779 /* Service level security */
780 int l2cap_chan_check_security(struct l2cap_chan *chan)
782 struct l2cap_conn *conn = chan->conn;
785 if (conn->hcon->type == LE_LINK)
786 return smp_conn_security(conn->hcon, chan->sec_level);
788 auth_type = l2cap_get_auth_type(chan);
790 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
793 static u8 l2cap_get_ident(struct l2cap_conn *conn)
797 /* Get next available identificator.
798 * 1 - 128 are used by kernel.
799 * 129 - 199 are reserved.
800 * 200 - 254 are used by utilities like l2ping, etc.
803 spin_lock(&conn->lock);
805 if (++conn->tx_ident > 128)
810 spin_unlock(&conn->lock);
815 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
818 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
821 BT_DBG("code 0x%2.2x", code);
826 if (lmp_no_flush_capable(conn->hcon->hdev))
827 flags = ACL_START_NO_FLUSH;
831 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
832 skb->priority = HCI_PRIO_MAX;
834 hci_send_acl(conn->hchan, skb, flags);
837 static bool __chan_is_moving(struct l2cap_chan *chan)
839 return chan->move_state != L2CAP_MOVE_STABLE &&
840 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
843 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
845 struct hci_conn *hcon = chan->conn->hcon;
848 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
851 if (chan->hs_hcon && !__chan_is_moving(chan)) {
853 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
860 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
861 lmp_no_flush_capable(hcon->hdev))
862 flags = ACL_START_NO_FLUSH;
866 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
867 hci_send_acl(chan->conn->hchan, skb, flags);
870 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
872 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
873 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
875 if (enh & L2CAP_CTRL_FRAME_TYPE) {
878 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
879 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
886 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
887 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
894 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
896 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
897 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
899 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
902 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
903 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
910 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
911 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
918 static inline void __unpack_control(struct l2cap_chan *chan,
921 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
922 __unpack_extended_control(get_unaligned_le32(skb->data),
923 &bt_cb(skb)->control);
924 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
926 __unpack_enhanced_control(get_unaligned_le16(skb->data),
927 &bt_cb(skb)->control);
928 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
932 static u32 __pack_extended_control(struct l2cap_ctrl *control)
936 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
937 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
939 if (control->sframe) {
940 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
941 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
942 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
944 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
945 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
951 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
955 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
956 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
958 if (control->sframe) {
959 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
960 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
961 packed |= L2CAP_CTRL_FRAME_TYPE;
963 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
964 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
970 static inline void __pack_control(struct l2cap_chan *chan,
971 struct l2cap_ctrl *control,
974 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
975 put_unaligned_le32(__pack_extended_control(control),
976 skb->data + L2CAP_HDR_SIZE);
978 put_unaligned_le16(__pack_enhanced_control(control),
979 skb->data + L2CAP_HDR_SIZE);
983 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
985 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
986 return L2CAP_EXT_HDR_SIZE;
988 return L2CAP_ENH_HDR_SIZE;
991 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
995 struct l2cap_hdr *lh;
996 int hlen = __ertm_hdr_size(chan);
998 if (chan->fcs == L2CAP_FCS_CRC16)
999 hlen += L2CAP_FCS_SIZE;
1001 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1004 return ERR_PTR(-ENOMEM);
1006 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1007 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1008 lh->cid = cpu_to_le16(chan->dcid);
1010 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1011 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1013 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1015 if (chan->fcs == L2CAP_FCS_CRC16) {
1016 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1017 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1020 skb->priority = HCI_PRIO_MAX;
1024 static void l2cap_send_sframe(struct l2cap_chan *chan,
1025 struct l2cap_ctrl *control)
1027 struct sk_buff *skb;
1030 BT_DBG("chan %p, control %p", chan, control);
1032 if (!control->sframe)
1035 if (__chan_is_moving(chan))
1038 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1042 if (control->super == L2CAP_SUPER_RR)
1043 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1044 else if (control->super == L2CAP_SUPER_RNR)
1045 set_bit(CONN_RNR_SENT, &chan->conn_state);
1047 if (control->super != L2CAP_SUPER_SREJ) {
1048 chan->last_acked_seq = control->reqseq;
1049 __clear_ack_timer(chan);
1052 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1053 control->final, control->poll, control->super);
1055 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1056 control_field = __pack_extended_control(control);
1058 control_field = __pack_enhanced_control(control);
1060 skb = l2cap_create_sframe_pdu(chan, control_field);
1062 l2cap_do_send(chan, skb);
1065 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1067 struct l2cap_ctrl control;
1069 BT_DBG("chan %p, poll %d", chan, poll);
1071 memset(&control, 0, sizeof(control));
1073 control.poll = poll;
1075 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1076 control.super = L2CAP_SUPER_RNR;
1078 control.super = L2CAP_SUPER_RR;
1080 control.reqseq = chan->buffer_seq;
1081 l2cap_send_sframe(chan, &control);
1084 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1086 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1089 static bool __amp_capable(struct l2cap_chan *chan)
1091 struct l2cap_conn *conn = chan->conn;
1092 struct hci_dev *hdev;
1093 bool amp_available = false;
1095 if (!conn->hs_enabled)
1098 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1101 read_lock(&hci_dev_list_lock);
1102 list_for_each_entry(hdev, &hci_dev_list, list) {
1103 if (hdev->amp_type != AMP_TYPE_BREDR &&
1104 test_bit(HCI_UP, &hdev->flags)) {
1105 amp_available = true;
1109 read_unlock(&hci_dev_list_lock);
1111 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1112 return amp_available;
1117 static bool l2cap_check_efs(struct l2cap_chan *chan)
1119 /* Check EFS parameters */
1123 void l2cap_send_conn_req(struct l2cap_chan *chan)
1125 struct l2cap_conn *conn = chan->conn;
1126 struct l2cap_conn_req req;
1128 req.scid = cpu_to_le16(chan->scid);
1129 req.psm = chan->psm;
1131 chan->ident = l2cap_get_ident(conn);
1133 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1135 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1138 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1140 struct l2cap_create_chan_req req;
1141 req.scid = cpu_to_le16(chan->scid);
1142 req.psm = chan->psm;
1143 req.amp_id = amp_id;
1145 chan->ident = l2cap_get_ident(chan->conn);
1147 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1151 static void l2cap_move_setup(struct l2cap_chan *chan)
1153 struct sk_buff *skb;
1155 BT_DBG("chan %p", chan);
1157 if (chan->mode != L2CAP_MODE_ERTM)
1160 __clear_retrans_timer(chan);
1161 __clear_monitor_timer(chan);
1162 __clear_ack_timer(chan);
1164 chan->retry_count = 0;
1165 skb_queue_walk(&chan->tx_q, skb) {
1166 if (bt_cb(skb)->control.retries)
1167 bt_cb(skb)->control.retries = 1;
1172 chan->expected_tx_seq = chan->buffer_seq;
1174 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1175 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1176 l2cap_seq_list_clear(&chan->retrans_list);
1177 l2cap_seq_list_clear(&chan->srej_list);
1178 skb_queue_purge(&chan->srej_q);
1180 chan->tx_state = L2CAP_TX_STATE_XMIT;
1181 chan->rx_state = L2CAP_RX_STATE_MOVE;
1183 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1186 static void l2cap_move_done(struct l2cap_chan *chan)
1188 u8 move_role = chan->move_role;
1189 BT_DBG("chan %p", chan);
1191 chan->move_state = L2CAP_MOVE_STABLE;
1192 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1194 if (chan->mode != L2CAP_MODE_ERTM)
1197 switch (move_role) {
1198 case L2CAP_MOVE_ROLE_INITIATOR:
1199 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1200 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1202 case L2CAP_MOVE_ROLE_RESPONDER:
1203 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1208 static void l2cap_le_flowctl_start(struct l2cap_chan *chan)
1211 chan->sdu_last_frag = NULL;
1214 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
1215 chan->mps = chan->imtu;
1217 chan->mps = L2CAP_LE_DEFAULT_MPS;
1219 skb_queue_head_init(&chan->tx_q);
1221 if (!chan->tx_credits)
1222 chan->ops->suspend(chan);
1225 static void l2cap_chan_ready(struct l2cap_chan *chan)
1227 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1228 chan->conf_state = 0;
1229 __clear_chan_timer(chan);
1231 if (chan->mode == L2CAP_MODE_LE_FLOWCTL)
1232 l2cap_le_flowctl_start(chan);
1234 chan->state = BT_CONNECTED;
1236 chan->ops->ready(chan);
1239 static void l2cap_le_connect(struct l2cap_chan *chan)
1241 struct l2cap_conn *conn = chan->conn;
1242 struct l2cap_le_conn_req req;
1244 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1247 req.psm = chan->psm;
1248 req.scid = cpu_to_le16(chan->scid);
1249 req.mtu = cpu_to_le16(chan->imtu);
1250 req.mps = cpu_to_le16(chan->mps);
1251 req.credits = cpu_to_le16(chan->rx_credits);
1253 chan->ident = l2cap_get_ident(conn);
1255 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1259 static void l2cap_le_start(struct l2cap_chan *chan)
1261 struct l2cap_conn *conn = chan->conn;
1263 if (!smp_conn_security(conn->hcon, chan->sec_level))
1267 l2cap_chan_ready(chan);
1271 if (chan->state == BT_CONNECT)
1272 l2cap_le_connect(chan);
1275 static void l2cap_start_connection(struct l2cap_chan *chan)
1277 if (__amp_capable(chan)) {
1278 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1279 a2mp_discover_amp(chan);
1280 } else if (chan->conn->hcon->type == LE_LINK) {
1281 l2cap_le_start(chan);
1283 l2cap_send_conn_req(chan);
1287 static void l2cap_do_start(struct l2cap_chan *chan)
1289 struct l2cap_conn *conn = chan->conn;
1291 if (conn->hcon->type == LE_LINK) {
1292 l2cap_le_start(chan);
1296 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1297 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1300 if (l2cap_chan_check_security(chan) &&
1301 __l2cap_no_conn_pending(chan)) {
1302 l2cap_start_connection(chan);
1305 struct l2cap_info_req req;
1306 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1308 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1309 conn->info_ident = l2cap_get_ident(conn);
1311 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1313 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1318 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1320 u32 local_feat_mask = l2cap_feat_mask;
1322 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1325 case L2CAP_MODE_ERTM:
1326 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1327 case L2CAP_MODE_STREAMING:
1328 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1334 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1336 struct l2cap_conn *conn = chan->conn;
1337 struct l2cap_disconn_req req;
1342 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1343 __clear_retrans_timer(chan);
1344 __clear_monitor_timer(chan);
1345 __clear_ack_timer(chan);
1348 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1349 l2cap_state_change(chan, BT_DISCONN);
1353 req.dcid = cpu_to_le16(chan->dcid);
1354 req.scid = cpu_to_le16(chan->scid);
1355 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1358 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1361 /* ---- L2CAP connections ---- */
1362 static void l2cap_conn_start(struct l2cap_conn *conn)
1364 struct l2cap_chan *chan, *tmp;
1366 BT_DBG("conn %p", conn);
1368 mutex_lock(&conn->chan_lock);
1370 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1371 l2cap_chan_lock(chan);
1373 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1374 l2cap_chan_unlock(chan);
1378 if (chan->state == BT_CONNECT) {
1379 if (!l2cap_chan_check_security(chan) ||
1380 !__l2cap_no_conn_pending(chan)) {
1381 l2cap_chan_unlock(chan);
1385 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1386 && test_bit(CONF_STATE2_DEVICE,
1387 &chan->conf_state)) {
1388 l2cap_chan_close(chan, ECONNRESET);
1389 l2cap_chan_unlock(chan);
1393 l2cap_start_connection(chan);
1395 } else if (chan->state == BT_CONNECT2) {
1396 struct l2cap_conn_rsp rsp;
1398 rsp.scid = cpu_to_le16(chan->dcid);
1399 rsp.dcid = cpu_to_le16(chan->scid);
1401 if (l2cap_chan_check_security(chan)) {
1402 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1403 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1404 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1405 chan->ops->defer(chan);
1408 l2cap_state_change(chan, BT_CONFIG);
1409 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1410 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1413 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1414 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1417 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1420 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1421 rsp.result != L2CAP_CR_SUCCESS) {
1422 l2cap_chan_unlock(chan);
1426 set_bit(CONF_REQ_SENT, &chan->conf_state);
1427 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1428 l2cap_build_conf_req(chan, buf), buf);
1429 chan->num_conf_req++;
1432 l2cap_chan_unlock(chan);
1435 mutex_unlock(&conn->chan_lock);
1438 /* Find socket with cid and source/destination bdaddr.
1439 * Returns closest match, locked.
1441 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1445 struct l2cap_chan *c, *c1 = NULL;
1447 read_lock(&chan_list_lock);
1449 list_for_each_entry(c, &chan_list, global_l) {
1450 if (state && c->state != state)
1453 if (c->scid == cid) {
1454 int src_match, dst_match;
1455 int src_any, dst_any;
1458 src_match = !bacmp(&c->src, src);
1459 dst_match = !bacmp(&c->dst, dst);
1460 if (src_match && dst_match) {
1461 read_unlock(&chan_list_lock);
1466 src_any = !bacmp(&c->src, BDADDR_ANY);
1467 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1468 if ((src_match && dst_any) || (src_any && dst_match) ||
1469 (src_any && dst_any))
1474 read_unlock(&chan_list_lock);
1479 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1481 struct hci_conn *hcon = conn->hcon;
1482 struct l2cap_chan *chan, *pchan;
1487 /* Check if we have socket listening on cid */
1488 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1489 &hcon->src, &hcon->dst);
1493 /* Client ATT sockets should override the server one */
1494 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1497 dst_type = bdaddr_type(hcon, hcon->dst_type);
1499 /* If device is blocked, do not create a channel for it */
1500 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1503 l2cap_chan_lock(pchan);
1505 chan = pchan->ops->new_connection(pchan);
1509 chan->dcid = L2CAP_CID_ATT;
1511 bacpy(&chan->src, &hcon->src);
1512 bacpy(&chan->dst, &hcon->dst);
1513 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1514 chan->dst_type = dst_type;
1516 __l2cap_chan_add(conn, chan);
1519 l2cap_chan_unlock(pchan);
1522 static void l2cap_conn_ready(struct l2cap_conn *conn)
1524 struct l2cap_chan *chan;
1525 struct hci_conn *hcon = conn->hcon;
1527 BT_DBG("conn %p", conn);
1529 /* For outgoing pairing which doesn't necessarily have an
1530 * associated socket (e.g. mgmt_pair_device).
1532 if (hcon->out && hcon->type == LE_LINK)
1533 smp_conn_security(hcon, hcon->pending_sec_level);
1535 mutex_lock(&conn->chan_lock);
1537 if (hcon->type == LE_LINK)
1538 l2cap_le_conn_ready(conn);
1540 list_for_each_entry(chan, &conn->chan_l, list) {
1542 l2cap_chan_lock(chan);
1544 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1545 l2cap_chan_unlock(chan);
1549 if (hcon->type == LE_LINK) {
1550 l2cap_le_start(chan);
1551 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1552 l2cap_chan_ready(chan);
1554 } else if (chan->state == BT_CONNECT) {
1555 l2cap_do_start(chan);
1558 l2cap_chan_unlock(chan);
1561 mutex_unlock(&conn->chan_lock);
1564 /* Notify sockets that we cannot guaranty reliability anymore */
1565 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1567 struct l2cap_chan *chan;
1569 BT_DBG("conn %p", conn);
1571 mutex_lock(&conn->chan_lock);
1573 list_for_each_entry(chan, &conn->chan_l, list) {
1574 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1575 l2cap_chan_set_err(chan, err);
1578 mutex_unlock(&conn->chan_lock);
1581 static void l2cap_info_timeout(struct work_struct *work)
1583 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1586 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1587 conn->info_ident = 0;
1589 l2cap_conn_start(conn);
1594 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1595 * callback is called during registration. The ->remove callback is called
1596 * during unregistration.
1597 * An l2cap_user object can either be explicitly unregistered or when the
1598 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1599 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1600 * External modules must own a reference to the l2cap_conn object if they intend
1601 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1602 * any time if they don't.
1605 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1607 struct hci_dev *hdev = conn->hcon->hdev;
1610 /* We need to check whether l2cap_conn is registered. If it is not, we
1611 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1612 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1613 * relies on the parent hci_conn object to be locked. This itself relies
1614 * on the hci_dev object to be locked. So we must lock the hci device
1619 if (user->list.next || user->list.prev) {
1624 /* conn->hchan is NULL after l2cap_conn_del() was called */
1630 ret = user->probe(conn, user);
1634 list_add(&user->list, &conn->users);
1638 hci_dev_unlock(hdev);
1641 EXPORT_SYMBOL(l2cap_register_user);
1643 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1645 struct hci_dev *hdev = conn->hcon->hdev;
1649 if (!user->list.next || !user->list.prev)
1652 list_del(&user->list);
1653 user->list.next = NULL;
1654 user->list.prev = NULL;
1655 user->remove(conn, user);
1658 hci_dev_unlock(hdev);
1660 EXPORT_SYMBOL(l2cap_unregister_user);
1662 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1664 struct l2cap_user *user;
1666 while (!list_empty(&conn->users)) {
1667 user = list_first_entry(&conn->users, struct l2cap_user, list);
1668 list_del(&user->list);
1669 user->list.next = NULL;
1670 user->list.prev = NULL;
1671 user->remove(conn, user);
1675 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1677 struct l2cap_conn *conn = hcon->l2cap_data;
1678 struct l2cap_chan *chan, *l;
1683 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1685 kfree_skb(conn->rx_skb);
1687 l2cap_unregister_all_users(conn);
1689 mutex_lock(&conn->chan_lock);
1692 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1693 l2cap_chan_hold(chan);
1694 l2cap_chan_lock(chan);
1696 l2cap_chan_del(chan, err);
1698 l2cap_chan_unlock(chan);
1700 chan->ops->close(chan);
1701 l2cap_chan_put(chan);
1704 mutex_unlock(&conn->chan_lock);
1706 hci_chan_del(conn->hchan);
1708 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1709 cancel_delayed_work_sync(&conn->info_timer);
1711 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1712 cancel_delayed_work_sync(&conn->security_timer);
1713 smp_chan_destroy(conn);
1716 hcon->l2cap_data = NULL;
1718 l2cap_conn_put(conn);
1721 static void security_timeout(struct work_struct *work)
1723 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1724 security_timer.work);
1726 BT_DBG("conn %p", conn);
1728 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1729 smp_chan_destroy(conn);
1730 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1734 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1736 struct l2cap_conn *conn = hcon->l2cap_data;
1737 struct hci_chan *hchan;
1742 hchan = hci_chan_create(hcon);
1746 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1748 hci_chan_del(hchan);
1752 kref_init(&conn->ref);
1753 hcon->l2cap_data = conn;
1755 hci_conn_get(conn->hcon);
1756 conn->hchan = hchan;
1758 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1760 switch (hcon->type) {
1762 if (hcon->hdev->le_mtu) {
1763 conn->mtu = hcon->hdev->le_mtu;
1768 conn->mtu = hcon->hdev->acl_mtu;
1772 conn->feat_mask = 0;
1774 if (hcon->type == ACL_LINK)
1775 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1776 &hcon->hdev->dev_flags);
1778 spin_lock_init(&conn->lock);
1779 mutex_init(&conn->chan_lock);
1781 INIT_LIST_HEAD(&conn->chan_l);
1782 INIT_LIST_HEAD(&conn->users);
1784 if (hcon->type == LE_LINK)
1785 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1787 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1789 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1794 static void l2cap_conn_free(struct kref *ref)
1796 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1798 hci_conn_put(conn->hcon);
1802 void l2cap_conn_get(struct l2cap_conn *conn)
1804 kref_get(&conn->ref);
1806 EXPORT_SYMBOL(l2cap_conn_get);
1808 void l2cap_conn_put(struct l2cap_conn *conn)
1810 kref_put(&conn->ref, l2cap_conn_free);
1812 EXPORT_SYMBOL(l2cap_conn_put);
1814 /* ---- Socket interface ---- */
1816 /* Find socket with psm and source / destination bdaddr.
1817 * Returns closest match.
1819 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1824 struct l2cap_chan *c, *c1 = NULL;
1826 read_lock(&chan_list_lock);
1828 list_for_each_entry(c, &chan_list, global_l) {
1829 if (state && c->state != state)
1832 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1835 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1838 if (c->psm == psm) {
1839 int src_match, dst_match;
1840 int src_any, dst_any;
1843 src_match = !bacmp(&c->src, src);
1844 dst_match = !bacmp(&c->dst, dst);
1845 if (src_match && dst_match) {
1846 read_unlock(&chan_list_lock);
1851 src_any = !bacmp(&c->src, BDADDR_ANY);
1852 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1853 if ((src_match && dst_any) || (src_any && dst_match) ||
1854 (src_any && dst_any))
1859 read_unlock(&chan_list_lock);
1864 static bool is_valid_psm(u16 psm, u8 dst_type)
1869 if (bdaddr_type_is_le(dst_type))
1870 return (psm < 0x00ff);
1872 /* PSM must be odd and lsb of upper byte must be 0 */
1873 return ((psm & 0x0101) == 0x0001);
1876 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1877 bdaddr_t *dst, u8 dst_type)
1879 struct l2cap_conn *conn;
1880 struct hci_conn *hcon;
1881 struct hci_dev *hdev;
1885 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1886 dst_type, __le16_to_cpu(psm));
1888 hdev = hci_get_route(dst, &chan->src);
1890 return -EHOSTUNREACH;
1894 l2cap_chan_lock(chan);
1896 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
1897 chan->chan_type != L2CAP_CHAN_RAW) {
1902 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1907 switch (chan->mode) {
1908 case L2CAP_MODE_BASIC:
1909 case L2CAP_MODE_LE_FLOWCTL:
1911 case L2CAP_MODE_ERTM:
1912 case L2CAP_MODE_STREAMING:
1921 switch (chan->state) {
1925 /* Already connecting */
1930 /* Already connected */
1944 /* Set destination address and psm */
1945 bacpy(&chan->dst, dst);
1946 chan->dst_type = dst_type;
1951 auth_type = l2cap_get_auth_type(chan);
1953 if (bdaddr_type_is_le(dst_type))
1954 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1955 chan->sec_level, auth_type);
1957 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1958 chan->sec_level, auth_type);
1961 err = PTR_ERR(hcon);
1965 conn = l2cap_conn_add(hcon);
1967 hci_conn_drop(hcon);
1972 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1973 hci_conn_drop(hcon);
1978 /* Update source addr of the socket */
1979 bacpy(&chan->src, &hcon->src);
1980 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1982 l2cap_chan_unlock(chan);
1983 l2cap_chan_add(conn, chan);
1984 l2cap_chan_lock(chan);
1986 /* l2cap_chan_add takes its own ref so we can drop this one */
1987 hci_conn_drop(hcon);
1989 l2cap_state_change(chan, BT_CONNECT);
1990 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1992 if (hcon->state == BT_CONNECTED) {
1993 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1994 __clear_chan_timer(chan);
1995 if (l2cap_chan_check_security(chan))
1996 l2cap_state_change(chan, BT_CONNECTED);
1998 l2cap_do_start(chan);
2004 l2cap_chan_unlock(chan);
2005 hci_dev_unlock(hdev);
2010 static void l2cap_monitor_timeout(struct work_struct *work)
2012 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2013 monitor_timer.work);
2015 BT_DBG("chan %p", chan);
2017 l2cap_chan_lock(chan);
2020 l2cap_chan_unlock(chan);
2021 l2cap_chan_put(chan);
2025 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2027 l2cap_chan_unlock(chan);
2028 l2cap_chan_put(chan);
2031 static void l2cap_retrans_timeout(struct work_struct *work)
2033 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2034 retrans_timer.work);
2036 BT_DBG("chan %p", chan);
2038 l2cap_chan_lock(chan);
2041 l2cap_chan_unlock(chan);
2042 l2cap_chan_put(chan);
2046 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2047 l2cap_chan_unlock(chan);
2048 l2cap_chan_put(chan);
2051 static void l2cap_streaming_send(struct l2cap_chan *chan,
2052 struct sk_buff_head *skbs)
2054 struct sk_buff *skb;
2055 struct l2cap_ctrl *control;
2057 BT_DBG("chan %p, skbs %p", chan, skbs);
2059 if (__chan_is_moving(chan))
2062 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2064 while (!skb_queue_empty(&chan->tx_q)) {
2066 skb = skb_dequeue(&chan->tx_q);
2068 bt_cb(skb)->control.retries = 1;
2069 control = &bt_cb(skb)->control;
2071 control->reqseq = 0;
2072 control->txseq = chan->next_tx_seq;
2074 __pack_control(chan, control, skb);
2076 if (chan->fcs == L2CAP_FCS_CRC16) {
2077 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2078 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2081 l2cap_do_send(chan, skb);
2083 BT_DBG("Sent txseq %u", control->txseq);
2085 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2086 chan->frames_sent++;
2090 static int l2cap_ertm_send(struct l2cap_chan *chan)
2092 struct sk_buff *skb, *tx_skb;
2093 struct l2cap_ctrl *control;
2096 BT_DBG("chan %p", chan);
2098 if (chan->state != BT_CONNECTED)
2101 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2104 if (__chan_is_moving(chan))
2107 while (chan->tx_send_head &&
2108 chan->unacked_frames < chan->remote_tx_win &&
2109 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2111 skb = chan->tx_send_head;
2113 bt_cb(skb)->control.retries = 1;
2114 control = &bt_cb(skb)->control;
2116 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2119 control->reqseq = chan->buffer_seq;
2120 chan->last_acked_seq = chan->buffer_seq;
2121 control->txseq = chan->next_tx_seq;
2123 __pack_control(chan, control, skb);
2125 if (chan->fcs == L2CAP_FCS_CRC16) {
2126 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2127 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2130 /* Clone after data has been modified. Data is assumed to be
2131 read-only (for locking purposes) on cloned sk_buffs.
2133 tx_skb = skb_clone(skb, GFP_KERNEL);
2138 __set_retrans_timer(chan);
2140 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2141 chan->unacked_frames++;
2142 chan->frames_sent++;
2145 if (skb_queue_is_last(&chan->tx_q, skb))
2146 chan->tx_send_head = NULL;
2148 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2150 l2cap_do_send(chan, tx_skb);
2151 BT_DBG("Sent txseq %u", control->txseq);
2154 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2155 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2160 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2162 struct l2cap_ctrl control;
2163 struct sk_buff *skb;
2164 struct sk_buff *tx_skb;
2167 BT_DBG("chan %p", chan);
2169 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2172 if (__chan_is_moving(chan))
2175 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2176 seq = l2cap_seq_list_pop(&chan->retrans_list);
2178 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2180 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2185 bt_cb(skb)->control.retries++;
2186 control = bt_cb(skb)->control;
2188 if (chan->max_tx != 0 &&
2189 bt_cb(skb)->control.retries > chan->max_tx) {
2190 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2191 l2cap_send_disconn_req(chan, ECONNRESET);
2192 l2cap_seq_list_clear(&chan->retrans_list);
2196 control.reqseq = chan->buffer_seq;
2197 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2202 if (skb_cloned(skb)) {
2203 /* Cloned sk_buffs are read-only, so we need a
2206 tx_skb = skb_copy(skb, GFP_KERNEL);
2208 tx_skb = skb_clone(skb, GFP_KERNEL);
2212 l2cap_seq_list_clear(&chan->retrans_list);
2216 /* Update skb contents */
2217 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2218 put_unaligned_le32(__pack_extended_control(&control),
2219 tx_skb->data + L2CAP_HDR_SIZE);
2221 put_unaligned_le16(__pack_enhanced_control(&control),
2222 tx_skb->data + L2CAP_HDR_SIZE);
2225 if (chan->fcs == L2CAP_FCS_CRC16) {
2226 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2227 put_unaligned_le16(fcs, skb_put(tx_skb,
2231 l2cap_do_send(chan, tx_skb);
2233 BT_DBG("Resent txseq %d", control.txseq);
2235 chan->last_acked_seq = chan->buffer_seq;
2239 static void l2cap_retransmit(struct l2cap_chan *chan,
2240 struct l2cap_ctrl *control)
2242 BT_DBG("chan %p, control %p", chan, control);
2244 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2245 l2cap_ertm_resend(chan);
2248 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2249 struct l2cap_ctrl *control)
2251 struct sk_buff *skb;
2253 BT_DBG("chan %p, control %p", chan, control);
2256 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2258 l2cap_seq_list_clear(&chan->retrans_list);
2260 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2263 if (chan->unacked_frames) {
2264 skb_queue_walk(&chan->tx_q, skb) {
2265 if (bt_cb(skb)->control.txseq == control->reqseq ||
2266 skb == chan->tx_send_head)
2270 skb_queue_walk_from(&chan->tx_q, skb) {
2271 if (skb == chan->tx_send_head)
2274 l2cap_seq_list_append(&chan->retrans_list,
2275 bt_cb(skb)->control.txseq);
2278 l2cap_ertm_resend(chan);
2282 static void l2cap_send_ack(struct l2cap_chan *chan)
2284 struct l2cap_ctrl control;
2285 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2286 chan->last_acked_seq);
2289 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2290 chan, chan->last_acked_seq, chan->buffer_seq);
2292 memset(&control, 0, sizeof(control));
2295 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2296 chan->rx_state == L2CAP_RX_STATE_RECV) {
2297 __clear_ack_timer(chan);
2298 control.super = L2CAP_SUPER_RNR;
2299 control.reqseq = chan->buffer_seq;
2300 l2cap_send_sframe(chan, &control);
2302 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2303 l2cap_ertm_send(chan);
2304 /* If any i-frames were sent, they included an ack */
2305 if (chan->buffer_seq == chan->last_acked_seq)
2309 /* Ack now if the window is 3/4ths full.
2310 * Calculate without mul or div
2312 threshold = chan->ack_win;
2313 threshold += threshold << 1;
2316 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2319 if (frames_to_ack >= threshold) {
2320 __clear_ack_timer(chan);
2321 control.super = L2CAP_SUPER_RR;
2322 control.reqseq = chan->buffer_seq;
2323 l2cap_send_sframe(chan, &control);
2328 __set_ack_timer(chan);
2332 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2333 struct msghdr *msg, int len,
2334 int count, struct sk_buff *skb)
2336 struct l2cap_conn *conn = chan->conn;
2337 struct sk_buff **frag;
2340 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2346 /* Continuation fragments (no L2CAP header) */
2347 frag = &skb_shinfo(skb)->frag_list;
2349 struct sk_buff *tmp;
2351 count = min_t(unsigned int, conn->mtu, len);
2353 tmp = chan->ops->alloc_skb(chan, count,
2354 msg->msg_flags & MSG_DONTWAIT);
2356 return PTR_ERR(tmp);
2360 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2363 (*frag)->priority = skb->priority;
2368 skb->len += (*frag)->len;
2369 skb->data_len += (*frag)->len;
2371 frag = &(*frag)->next;
2377 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2378 struct msghdr *msg, size_t len,
2381 struct l2cap_conn *conn = chan->conn;
2382 struct sk_buff *skb;
2383 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2384 struct l2cap_hdr *lh;
2386 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2387 __le16_to_cpu(chan->psm), len, priority);
2389 count = min_t(unsigned int, (conn->mtu - hlen), len);
2391 skb = chan->ops->alloc_skb(chan, count + hlen,
2392 msg->msg_flags & MSG_DONTWAIT);
2396 skb->priority = priority;
2398 /* Create L2CAP header */
2399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2400 lh->cid = cpu_to_le16(chan->dcid);
2401 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2402 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2404 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2405 if (unlikely(err < 0)) {
2407 return ERR_PTR(err);
2412 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2413 struct msghdr *msg, size_t len,
2416 struct l2cap_conn *conn = chan->conn;
2417 struct sk_buff *skb;
2419 struct l2cap_hdr *lh;
2421 BT_DBG("chan %p len %zu", chan, len);
2423 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2425 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2426 msg->msg_flags & MSG_DONTWAIT);
2430 skb->priority = priority;
2432 /* Create L2CAP header */
2433 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2434 lh->cid = cpu_to_le16(chan->dcid);
2435 lh->len = cpu_to_le16(len);
2437 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2438 if (unlikely(err < 0)) {
2440 return ERR_PTR(err);
2445 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2446 struct msghdr *msg, size_t len,
2449 struct l2cap_conn *conn = chan->conn;
2450 struct sk_buff *skb;
2451 int err, count, hlen;
2452 struct l2cap_hdr *lh;
2454 BT_DBG("chan %p len %zu", chan, len);
2457 return ERR_PTR(-ENOTCONN);
2459 hlen = __ertm_hdr_size(chan);
2462 hlen += L2CAP_SDULEN_SIZE;
2464 if (chan->fcs == L2CAP_FCS_CRC16)
2465 hlen += L2CAP_FCS_SIZE;
2467 count = min_t(unsigned int, (conn->mtu - hlen), len);
2469 skb = chan->ops->alloc_skb(chan, count + hlen,
2470 msg->msg_flags & MSG_DONTWAIT);
2474 /* Create L2CAP header */
2475 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2476 lh->cid = cpu_to_le16(chan->dcid);
2477 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2479 /* Control header is populated later */
2480 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2481 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2483 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2486 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2488 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2489 if (unlikely(err < 0)) {
2491 return ERR_PTR(err);
2494 bt_cb(skb)->control.fcs = chan->fcs;
2495 bt_cb(skb)->control.retries = 0;
2499 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2500 struct sk_buff_head *seg_queue,
2501 struct msghdr *msg, size_t len)
2503 struct sk_buff *skb;
2508 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2510 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2511 * so fragmented skbs are not used. The HCI layer's handling
2512 * of fragmented skbs is not compatible with ERTM's queueing.
2515 /* PDU size is derived from the HCI MTU */
2516 pdu_len = chan->conn->mtu;
2518 /* Constrain PDU size for BR/EDR connections */
2520 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2522 /* Adjust for largest possible L2CAP overhead. */
2524 pdu_len -= L2CAP_FCS_SIZE;
2526 pdu_len -= __ertm_hdr_size(chan);
2528 /* Remote device may have requested smaller PDUs */
2529 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2531 if (len <= pdu_len) {
2532 sar = L2CAP_SAR_UNSEGMENTED;
2536 sar = L2CAP_SAR_START;
2538 pdu_len -= L2CAP_SDULEN_SIZE;
2542 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2545 __skb_queue_purge(seg_queue);
2546 return PTR_ERR(skb);
2549 bt_cb(skb)->control.sar = sar;
2550 __skb_queue_tail(seg_queue, skb);
2555 pdu_len += L2CAP_SDULEN_SIZE;
2558 if (len <= pdu_len) {
2559 sar = L2CAP_SAR_END;
2562 sar = L2CAP_SAR_CONTINUE;
2569 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2571 size_t len, u16 sdulen)
2573 struct l2cap_conn *conn = chan->conn;
2574 struct sk_buff *skb;
2575 int err, count, hlen;
2576 struct l2cap_hdr *lh;
2578 BT_DBG("chan %p len %zu", chan, len);
2581 return ERR_PTR(-ENOTCONN);
2583 hlen = L2CAP_HDR_SIZE;
2586 hlen += L2CAP_SDULEN_SIZE;
2588 count = min_t(unsigned int, (conn->mtu - hlen), len);
2590 skb = chan->ops->alloc_skb(chan, count + hlen,
2591 msg->msg_flags & MSG_DONTWAIT);
2595 /* Create L2CAP header */
2596 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2597 lh->cid = cpu_to_le16(chan->dcid);
2598 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2601 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2603 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2604 if (unlikely(err < 0)) {
2606 return ERR_PTR(err);
2612 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2613 struct sk_buff_head *seg_queue,
2614 struct msghdr *msg, size_t len)
2616 struct sk_buff *skb;
2620 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2622 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2624 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2627 pdu_len -= L2CAP_SDULEN_SIZE;
2633 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2635 __skb_queue_purge(seg_queue);
2636 return PTR_ERR(skb);
2639 __skb_queue_tail(seg_queue, skb);
2645 pdu_len += L2CAP_SDULEN_SIZE;
2652 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2655 struct sk_buff *skb;
2657 struct sk_buff_head seg_queue;
2662 /* Connectionless channel */
2663 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2664 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2666 return PTR_ERR(skb);
2668 l2cap_do_send(chan, skb);
2672 switch (chan->mode) {
2673 case L2CAP_MODE_LE_FLOWCTL:
2674 /* Check outgoing MTU */
2675 if (len > chan->omtu)
2678 if (!chan->tx_credits)
2681 __skb_queue_head_init(&seg_queue);
2683 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2685 if (chan->state != BT_CONNECTED) {
2686 __skb_queue_purge(&seg_queue);
2693 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2695 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2696 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2700 if (!chan->tx_credits)
2701 chan->ops->suspend(chan);
2707 case L2CAP_MODE_BASIC:
2708 /* Check outgoing MTU */
2709 if (len > chan->omtu)
2712 /* Create a basic PDU */
2713 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2715 return PTR_ERR(skb);
2717 l2cap_do_send(chan, skb);
2721 case L2CAP_MODE_ERTM:
2722 case L2CAP_MODE_STREAMING:
2723 /* Check outgoing MTU */
2724 if (len > chan->omtu) {
2729 __skb_queue_head_init(&seg_queue);
2731 /* Do segmentation before calling in to the state machine,
2732 * since it's possible to block while waiting for memory
2735 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2737 /* The channel could have been closed while segmenting,
2738 * check that it is still connected.
2740 if (chan->state != BT_CONNECTED) {
2741 __skb_queue_purge(&seg_queue);
2748 if (chan->mode == L2CAP_MODE_ERTM)
2749 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2751 l2cap_streaming_send(chan, &seg_queue);
2755 /* If the skbs were not queued for sending, they'll still be in
2756 * seg_queue and need to be purged.
2758 __skb_queue_purge(&seg_queue);
2762 BT_DBG("bad state %1.1x", chan->mode);
2769 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2771 struct l2cap_ctrl control;
2774 BT_DBG("chan %p, txseq %u", chan, txseq);
2776 memset(&control, 0, sizeof(control));
2778 control.super = L2CAP_SUPER_SREJ;
2780 for (seq = chan->expected_tx_seq; seq != txseq;
2781 seq = __next_seq(chan, seq)) {
2782 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2783 control.reqseq = seq;
2784 l2cap_send_sframe(chan, &control);
2785 l2cap_seq_list_append(&chan->srej_list, seq);
2789 chan->expected_tx_seq = __next_seq(chan, txseq);
2792 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2794 struct l2cap_ctrl control;
2796 BT_DBG("chan %p", chan);
2798 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2801 memset(&control, 0, sizeof(control));
2803 control.super = L2CAP_SUPER_SREJ;
2804 control.reqseq = chan->srej_list.tail;
2805 l2cap_send_sframe(chan, &control);
2808 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2810 struct l2cap_ctrl control;
2814 BT_DBG("chan %p, txseq %u", chan, txseq);
2816 memset(&control, 0, sizeof(control));
2818 control.super = L2CAP_SUPER_SREJ;
2820 /* Capture initial list head to allow only one pass through the list. */
2821 initial_head = chan->srej_list.head;
2824 seq = l2cap_seq_list_pop(&chan->srej_list);
2825 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2828 control.reqseq = seq;
2829 l2cap_send_sframe(chan, &control);
2830 l2cap_seq_list_append(&chan->srej_list, seq);
2831 } while (chan->srej_list.head != initial_head);
2834 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2836 struct sk_buff *acked_skb;
2839 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2841 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2844 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2845 chan->expected_ack_seq, chan->unacked_frames);
2847 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2848 ackseq = __next_seq(chan, ackseq)) {
2850 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2852 skb_unlink(acked_skb, &chan->tx_q);
2853 kfree_skb(acked_skb);
2854 chan->unacked_frames--;
2858 chan->expected_ack_seq = reqseq;
2860 if (chan->unacked_frames == 0)
2861 __clear_retrans_timer(chan);
2863 BT_DBG("unacked_frames %u", chan->unacked_frames);
2866 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2868 BT_DBG("chan %p", chan);
2870 chan->expected_tx_seq = chan->buffer_seq;
2871 l2cap_seq_list_clear(&chan->srej_list);
2872 skb_queue_purge(&chan->srej_q);
2873 chan->rx_state = L2CAP_RX_STATE_RECV;
2876 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2877 struct l2cap_ctrl *control,
2878 struct sk_buff_head *skbs, u8 event)
2880 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2884 case L2CAP_EV_DATA_REQUEST:
2885 if (chan->tx_send_head == NULL)
2886 chan->tx_send_head = skb_peek(skbs);
2888 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2889 l2cap_ertm_send(chan);
2891 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2892 BT_DBG("Enter LOCAL_BUSY");
2893 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2895 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2896 /* The SREJ_SENT state must be aborted if we are to
2897 * enter the LOCAL_BUSY state.
2899 l2cap_abort_rx_srej_sent(chan);
2902 l2cap_send_ack(chan);
2905 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2906 BT_DBG("Exit LOCAL_BUSY");
2907 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2909 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2910 struct l2cap_ctrl local_control;
2912 memset(&local_control, 0, sizeof(local_control));
2913 local_control.sframe = 1;
2914 local_control.super = L2CAP_SUPER_RR;
2915 local_control.poll = 1;
2916 local_control.reqseq = chan->buffer_seq;
2917 l2cap_send_sframe(chan, &local_control);
2919 chan->retry_count = 1;
2920 __set_monitor_timer(chan);
2921 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2924 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2925 l2cap_process_reqseq(chan, control->reqseq);
2927 case L2CAP_EV_EXPLICIT_POLL:
2928 l2cap_send_rr_or_rnr(chan, 1);
2929 chan->retry_count = 1;
2930 __set_monitor_timer(chan);
2931 __clear_ack_timer(chan);
2932 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2934 case L2CAP_EV_RETRANS_TO:
2935 l2cap_send_rr_or_rnr(chan, 1);
2936 chan->retry_count = 1;
2937 __set_monitor_timer(chan);
2938 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2940 case L2CAP_EV_RECV_FBIT:
2941 /* Nothing to process */
2948 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2949 struct l2cap_ctrl *control,
2950 struct sk_buff_head *skbs, u8 event)
2952 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2956 case L2CAP_EV_DATA_REQUEST:
2957 if (chan->tx_send_head == NULL)
2958 chan->tx_send_head = skb_peek(skbs);
2959 /* Queue data, but don't send. */
2960 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2962 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2963 BT_DBG("Enter LOCAL_BUSY");
2964 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2966 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2967 /* The SREJ_SENT state must be aborted if we are to
2968 * enter the LOCAL_BUSY state.
2970 l2cap_abort_rx_srej_sent(chan);
2973 l2cap_send_ack(chan);
2976 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2977 BT_DBG("Exit LOCAL_BUSY");
2978 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2980 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2981 struct l2cap_ctrl local_control;
2982 memset(&local_control, 0, sizeof(local_control));
2983 local_control.sframe = 1;
2984 local_control.super = L2CAP_SUPER_RR;
2985 local_control.poll = 1;
2986 local_control.reqseq = chan->buffer_seq;
2987 l2cap_send_sframe(chan, &local_control);
2989 chan->retry_count = 1;
2990 __set_monitor_timer(chan);
2991 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2994 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2995 l2cap_process_reqseq(chan, control->reqseq);
2999 case L2CAP_EV_RECV_FBIT:
3000 if (control && control->final) {
3001 __clear_monitor_timer(chan);
3002 if (chan->unacked_frames > 0)
3003 __set_retrans_timer(chan);
3004 chan->retry_count = 0;
3005 chan->tx_state = L2CAP_TX_STATE_XMIT;
3006 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3009 case L2CAP_EV_EXPLICIT_POLL:
3012 case L2CAP_EV_MONITOR_TO:
3013 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3014 l2cap_send_rr_or_rnr(chan, 1);
3015 __set_monitor_timer(chan);
3016 chan->retry_count++;
3018 l2cap_send_disconn_req(chan, ECONNABORTED);
3026 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3027 struct sk_buff_head *skbs, u8 event)
3029 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3030 chan, control, skbs, event, chan->tx_state);
3032 switch (chan->tx_state) {
3033 case L2CAP_TX_STATE_XMIT:
3034 l2cap_tx_state_xmit(chan, control, skbs, event);
3036 case L2CAP_TX_STATE_WAIT_F:
3037 l2cap_tx_state_wait_f(chan, control, skbs, event);
3045 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3046 struct l2cap_ctrl *control)
3048 BT_DBG("chan %p, control %p", chan, control);
3049 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3052 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3053 struct l2cap_ctrl *control)
3055 BT_DBG("chan %p, control %p", chan, control);
3056 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3059 /* Copy frame to all raw sockets on that connection */
3060 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3062 struct sk_buff *nskb;
3063 struct l2cap_chan *chan;
3065 BT_DBG("conn %p", conn);
3067 mutex_lock(&conn->chan_lock);
3069 list_for_each_entry(chan, &conn->chan_l, list) {
3070 if (chan->chan_type != L2CAP_CHAN_RAW)
3073 /* Don't send frame to the channel it came from */
3074 if (bt_cb(skb)->chan == chan)
3077 nskb = skb_clone(skb, GFP_KERNEL);
3080 if (chan->ops->recv(chan, nskb))
3084 mutex_unlock(&conn->chan_lock);
3087 /* ---- L2CAP signalling commands ---- */
3088 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3089 u8 ident, u16 dlen, void *data)
3091 struct sk_buff *skb, **frag;
3092 struct l2cap_cmd_hdr *cmd;
3093 struct l2cap_hdr *lh;
3096 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3097 conn, code, ident, dlen);
3099 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3102 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3103 count = min_t(unsigned int, conn->mtu, len);
3105 skb = bt_skb_alloc(count, GFP_KERNEL);
3109 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3110 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3112 if (conn->hcon->type == LE_LINK)
3113 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3115 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3117 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3120 cmd->len = cpu_to_le16(dlen);
3123 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3124 memcpy(skb_put(skb, count), data, count);
3130 /* Continuation fragments (no L2CAP header) */
3131 frag = &skb_shinfo(skb)->frag_list;
3133 count = min_t(unsigned int, conn->mtu, len);
3135 *frag = bt_skb_alloc(count, GFP_KERNEL);
3139 memcpy(skb_put(*frag, count), data, count);
3144 frag = &(*frag)->next;
3154 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3157 struct l2cap_conf_opt *opt = *ptr;
3160 len = L2CAP_CONF_OPT_SIZE + opt->len;
3168 *val = *((u8 *) opt->val);
3172 *val = get_unaligned_le16(opt->val);
3176 *val = get_unaligned_le32(opt->val);
3180 *val = (unsigned long) opt->val;
3184 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3188 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3190 struct l2cap_conf_opt *opt = *ptr;
3192 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3199 *((u8 *) opt->val) = val;
3203 put_unaligned_le16(val, opt->val);
3207 put_unaligned_le32(val, opt->val);
3211 memcpy(opt->val, (void *) val, len);
3215 *ptr += L2CAP_CONF_OPT_SIZE + len;
3218 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3220 struct l2cap_conf_efs efs;
3222 switch (chan->mode) {
3223 case L2CAP_MODE_ERTM:
3224 efs.id = chan->local_id;
3225 efs.stype = chan->local_stype;
3226 efs.msdu = cpu_to_le16(chan->local_msdu);
3227 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3228 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3229 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3232 case L2CAP_MODE_STREAMING:
3234 efs.stype = L2CAP_SERV_BESTEFFORT;
3235 efs.msdu = cpu_to_le16(chan->local_msdu);
3236 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3245 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3246 (unsigned long) &efs);
3249 static void l2cap_ack_timeout(struct work_struct *work)
3251 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3255 BT_DBG("chan %p", chan);
3257 l2cap_chan_lock(chan);
3259 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3260 chan->last_acked_seq);
3263 l2cap_send_rr_or_rnr(chan, 0);
3265 l2cap_chan_unlock(chan);
3266 l2cap_chan_put(chan);
3269 int l2cap_ertm_init(struct l2cap_chan *chan)
3273 chan->next_tx_seq = 0;
3274 chan->expected_tx_seq = 0;
3275 chan->expected_ack_seq = 0;
3276 chan->unacked_frames = 0;
3277 chan->buffer_seq = 0;
3278 chan->frames_sent = 0;
3279 chan->last_acked_seq = 0;
3281 chan->sdu_last_frag = NULL;
3284 skb_queue_head_init(&chan->tx_q);
3286 chan->local_amp_id = AMP_ID_BREDR;
3287 chan->move_id = AMP_ID_BREDR;
3288 chan->move_state = L2CAP_MOVE_STABLE;
3289 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3291 if (chan->mode != L2CAP_MODE_ERTM)
3294 chan->rx_state = L2CAP_RX_STATE_RECV;
3295 chan->tx_state = L2CAP_TX_STATE_XMIT;
3297 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3298 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3299 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3301 skb_queue_head_init(&chan->srej_q);
3303 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3307 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3309 l2cap_seq_list_free(&chan->srej_list);
3314 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3317 case L2CAP_MODE_STREAMING:
3318 case L2CAP_MODE_ERTM:
3319 if (l2cap_mode_supported(mode, remote_feat_mask))
3323 return L2CAP_MODE_BASIC;
3327 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3329 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3332 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3334 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3337 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3338 struct l2cap_conf_rfc *rfc)
3340 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3341 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3343 /* Class 1 devices have must have ERTM timeouts
3344 * exceeding the Link Supervision Timeout. The
3345 * default Link Supervision Timeout for AMP
3346 * controllers is 10 seconds.
3348 * Class 1 devices use 0xffffffff for their
3349 * best-effort flush timeout, so the clamping logic
3350 * will result in a timeout that meets the above
3351 * requirement. ERTM timeouts are 16-bit values, so
3352 * the maximum timeout is 65.535 seconds.
3355 /* Convert timeout to milliseconds and round */
3356 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3358 /* This is the recommended formula for class 2 devices
3359 * that start ERTM timers when packets are sent to the
3362 ertm_to = 3 * ertm_to + 500;
3364 if (ertm_to > 0xffff)
3367 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3368 rfc->monitor_timeout = rfc->retrans_timeout;
3370 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3371 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3375 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3377 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3378 __l2cap_ews_supported(chan->conn)) {
3379 /* use extended control field */
3380 set_bit(FLAG_EXT_CTRL, &chan->flags);
3381 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3383 chan->tx_win = min_t(u16, chan->tx_win,
3384 L2CAP_DEFAULT_TX_WINDOW);
3385 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3387 chan->ack_win = chan->tx_win;
3390 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3392 struct l2cap_conf_req *req = data;
3393 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3394 void *ptr = req->data;
3397 BT_DBG("chan %p", chan);
3399 if (chan->num_conf_req || chan->num_conf_rsp)
3402 switch (chan->mode) {
3403 case L2CAP_MODE_STREAMING:
3404 case L2CAP_MODE_ERTM:
3405 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3408 if (__l2cap_efs_supported(chan->conn))
3409 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3413 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3418 if (chan->imtu != L2CAP_DEFAULT_MTU)
3419 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3421 switch (chan->mode) {
3422 case L2CAP_MODE_BASIC:
3423 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3424 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3427 rfc.mode = L2CAP_MODE_BASIC;
3429 rfc.max_transmit = 0;
3430 rfc.retrans_timeout = 0;
3431 rfc.monitor_timeout = 0;
3432 rfc.max_pdu_size = 0;
3434 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3435 (unsigned long) &rfc);
3438 case L2CAP_MODE_ERTM:
3439 rfc.mode = L2CAP_MODE_ERTM;
3440 rfc.max_transmit = chan->max_tx;
3442 __l2cap_set_ertm_timeouts(chan, &rfc);
3444 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3445 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3447 rfc.max_pdu_size = cpu_to_le16(size);
3449 l2cap_txwin_setup(chan);
3451 rfc.txwin_size = min_t(u16, chan->tx_win,
3452 L2CAP_DEFAULT_TX_WINDOW);
3454 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3455 (unsigned long) &rfc);
3457 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3458 l2cap_add_opt_efs(&ptr, chan);
3460 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3461 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3464 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3465 if (chan->fcs == L2CAP_FCS_NONE ||
3466 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3467 chan->fcs = L2CAP_FCS_NONE;
3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3473 case L2CAP_MODE_STREAMING:
3474 l2cap_txwin_setup(chan);
3475 rfc.mode = L2CAP_MODE_STREAMING;
3477 rfc.max_transmit = 0;
3478 rfc.retrans_timeout = 0;
3479 rfc.monitor_timeout = 0;
3481 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3482 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3484 rfc.max_pdu_size = cpu_to_le16(size);
3486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3487 (unsigned long) &rfc);
3489 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3490 l2cap_add_opt_efs(&ptr, chan);
3492 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3493 if (chan->fcs == L2CAP_FCS_NONE ||
3494 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3495 chan->fcs = L2CAP_FCS_NONE;
3496 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3502 req->dcid = cpu_to_le16(chan->dcid);
3503 req->flags = __constant_cpu_to_le16(0);
3508 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3510 struct l2cap_conf_rsp *rsp = data;
3511 void *ptr = rsp->data;
3512 void *req = chan->conf_req;
3513 int len = chan->conf_len;
3514 int type, hint, olen;
3516 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3517 struct l2cap_conf_efs efs;
3519 u16 mtu = L2CAP_DEFAULT_MTU;
3520 u16 result = L2CAP_CONF_SUCCESS;
3523 BT_DBG("chan %p", chan);
3525 while (len >= L2CAP_CONF_OPT_SIZE) {
3526 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3528 hint = type & L2CAP_CONF_HINT;
3529 type &= L2CAP_CONF_MASK;
3532 case L2CAP_CONF_MTU:
3536 case L2CAP_CONF_FLUSH_TO:
3537 chan->flush_to = val;
3540 case L2CAP_CONF_QOS:
3543 case L2CAP_CONF_RFC:
3544 if (olen == sizeof(rfc))
3545 memcpy(&rfc, (void *) val, olen);
3548 case L2CAP_CONF_FCS:
3549 if (val == L2CAP_FCS_NONE)
3550 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3553 case L2CAP_CONF_EFS:
3555 if (olen == sizeof(efs))
3556 memcpy(&efs, (void *) val, olen);
3559 case L2CAP_CONF_EWS:
3560 if (!chan->conn->hs_enabled)
3561 return -ECONNREFUSED;
3563 set_bit(FLAG_EXT_CTRL, &chan->flags);
3564 set_bit(CONF_EWS_RECV, &chan->conf_state);
3565 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3566 chan->remote_tx_win = val;
3573 result = L2CAP_CONF_UNKNOWN;
3574 *((u8 *) ptr++) = type;
3579 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3582 switch (chan->mode) {
3583 case L2CAP_MODE_STREAMING:
3584 case L2CAP_MODE_ERTM:
3585 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3586 chan->mode = l2cap_select_mode(rfc.mode,
3587 chan->conn->feat_mask);
3592 if (__l2cap_efs_supported(chan->conn))
3593 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3595 return -ECONNREFUSED;
3598 if (chan->mode != rfc.mode)
3599 return -ECONNREFUSED;
3605 if (chan->mode != rfc.mode) {
3606 result = L2CAP_CONF_UNACCEPT;
3607 rfc.mode = chan->mode;
3609 if (chan->num_conf_rsp == 1)
3610 return -ECONNREFUSED;
3612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3613 (unsigned long) &rfc);
3616 if (result == L2CAP_CONF_SUCCESS) {
3617 /* Configure output options and let the other side know
3618 * which ones we don't like. */
3620 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3621 result = L2CAP_CONF_UNACCEPT;
3624 set_bit(CONF_MTU_DONE, &chan->conf_state);
3626 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3629 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3630 efs.stype != L2CAP_SERV_NOTRAFIC &&
3631 efs.stype != chan->local_stype) {
3633 result = L2CAP_CONF_UNACCEPT;
3635 if (chan->num_conf_req >= 1)
3636 return -ECONNREFUSED;
3638 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3640 (unsigned long) &efs);
3642 /* Send PENDING Conf Rsp */
3643 result = L2CAP_CONF_PENDING;
3644 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3649 case L2CAP_MODE_BASIC:
3650 chan->fcs = L2CAP_FCS_NONE;
3651 set_bit(CONF_MODE_DONE, &chan->conf_state);
3654 case L2CAP_MODE_ERTM:
3655 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3656 chan->remote_tx_win = rfc.txwin_size;
3658 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3660 chan->remote_max_tx = rfc.max_transmit;
3662 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3663 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3664 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3665 rfc.max_pdu_size = cpu_to_le16(size);
3666 chan->remote_mps = size;
3668 __l2cap_set_ertm_timeouts(chan, &rfc);
3670 set_bit(CONF_MODE_DONE, &chan->conf_state);
3672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3673 sizeof(rfc), (unsigned long) &rfc);
3675 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3676 chan->remote_id = efs.id;
3677 chan->remote_stype = efs.stype;
3678 chan->remote_msdu = le16_to_cpu(efs.msdu);
3679 chan->remote_flush_to =
3680 le32_to_cpu(efs.flush_to);
3681 chan->remote_acc_lat =
3682 le32_to_cpu(efs.acc_lat);
3683 chan->remote_sdu_itime =
3684 le32_to_cpu(efs.sdu_itime);
3685 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3687 (unsigned long) &efs);
3691 case L2CAP_MODE_STREAMING:
3692 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3693 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3694 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3695 rfc.max_pdu_size = cpu_to_le16(size);
3696 chan->remote_mps = size;
3698 set_bit(CONF_MODE_DONE, &chan->conf_state);
3700 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3701 (unsigned long) &rfc);
3706 result = L2CAP_CONF_UNACCEPT;
3708 memset(&rfc, 0, sizeof(rfc));
3709 rfc.mode = chan->mode;
3712 if (result == L2CAP_CONF_SUCCESS)
3713 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3715 rsp->scid = cpu_to_le16(chan->dcid);
3716 rsp->result = cpu_to_le16(result);
3717 rsp->flags = __constant_cpu_to_le16(0);
3722 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3723 void *data, u16 *result)
3725 struct l2cap_conf_req *req = data;
3726 void *ptr = req->data;
3729 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3730 struct l2cap_conf_efs efs;
3732 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3734 while (len >= L2CAP_CONF_OPT_SIZE) {
3735 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3738 case L2CAP_CONF_MTU:
3739 if (val < L2CAP_DEFAULT_MIN_MTU) {
3740 *result = L2CAP_CONF_UNACCEPT;
3741 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3744 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3747 case L2CAP_CONF_FLUSH_TO:
3748 chan->flush_to = val;
3749 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3753 case L2CAP_CONF_RFC:
3754 if (olen == sizeof(rfc))
3755 memcpy(&rfc, (void *)val, olen);
3757 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3758 rfc.mode != chan->mode)
3759 return -ECONNREFUSED;
3763 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3764 sizeof(rfc), (unsigned long) &rfc);
3767 case L2CAP_CONF_EWS:
3768 chan->ack_win = min_t(u16, val, chan->ack_win);
3769 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3773 case L2CAP_CONF_EFS:
3774 if (olen == sizeof(efs))
3775 memcpy(&efs, (void *)val, olen);
3777 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3778 efs.stype != L2CAP_SERV_NOTRAFIC &&
3779 efs.stype != chan->local_stype)
3780 return -ECONNREFUSED;
3782 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3783 (unsigned long) &efs);
3786 case L2CAP_CONF_FCS:
3787 if (*result == L2CAP_CONF_PENDING)
3788 if (val == L2CAP_FCS_NONE)
3789 set_bit(CONF_RECV_NO_FCS,
3795 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3796 return -ECONNREFUSED;
3798 chan->mode = rfc.mode;
3800 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3802 case L2CAP_MODE_ERTM:
3803 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3804 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3805 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3806 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3807 chan->ack_win = min_t(u16, chan->ack_win,
3810 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3811 chan->local_msdu = le16_to_cpu(efs.msdu);
3812 chan->local_sdu_itime =
3813 le32_to_cpu(efs.sdu_itime);
3814 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3815 chan->local_flush_to =
3816 le32_to_cpu(efs.flush_to);
3820 case L2CAP_MODE_STREAMING:
3821 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3825 req->dcid = cpu_to_le16(chan->dcid);
3826 req->flags = __constant_cpu_to_le16(0);
3831 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3832 u16 result, u16 flags)
3834 struct l2cap_conf_rsp *rsp = data;
3835 void *ptr = rsp->data;
3837 BT_DBG("chan %p", chan);
3839 rsp->scid = cpu_to_le16(chan->dcid);
3840 rsp->result = cpu_to_le16(result);
3841 rsp->flags = cpu_to_le16(flags);
3846 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3848 struct l2cap_le_conn_rsp rsp;
3849 struct l2cap_conn *conn = chan->conn;
3851 BT_DBG("chan %p", chan);
3853 rsp.dcid = cpu_to_le16(chan->scid);
3854 rsp.mtu = cpu_to_le16(chan->imtu);
3855 rsp.mps = cpu_to_le16(chan->mps);
3856 rsp.credits = cpu_to_le16(chan->rx_credits);
3857 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3859 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3863 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3865 struct l2cap_conn_rsp rsp;
3866 struct l2cap_conn *conn = chan->conn;
3870 rsp.scid = cpu_to_le16(chan->dcid);
3871 rsp.dcid = cpu_to_le16(chan->scid);
3872 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3873 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3876 rsp_code = L2CAP_CREATE_CHAN_RSP;
3878 rsp_code = L2CAP_CONN_RSP;
3880 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3882 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3884 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3887 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3888 l2cap_build_conf_req(chan, buf), buf);
3889 chan->num_conf_req++;
3892 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3896 /* Use sane default values in case a misbehaving remote device
3897 * did not send an RFC or extended window size option.
3899 u16 txwin_ext = chan->ack_win;
3900 struct l2cap_conf_rfc rfc = {
3902 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3903 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3904 .max_pdu_size = cpu_to_le16(chan->imtu),
3905 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3908 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3910 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3913 while (len >= L2CAP_CONF_OPT_SIZE) {
3914 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3917 case L2CAP_CONF_RFC:
3918 if (olen == sizeof(rfc))
3919 memcpy(&rfc, (void *)val, olen);
3921 case L2CAP_CONF_EWS:
3928 case L2CAP_MODE_ERTM:
3929 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3930 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3931 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3932 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3933 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3935 chan->ack_win = min_t(u16, chan->ack_win,
3938 case L2CAP_MODE_STREAMING:
3939 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3943 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3944 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3947 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3949 if (cmd_len < sizeof(*rej))
3952 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3955 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3956 cmd->ident == conn->info_ident) {
3957 cancel_delayed_work(&conn->info_timer);
3959 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3960 conn->info_ident = 0;
3962 l2cap_conn_start(conn);
3968 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3969 struct l2cap_cmd_hdr *cmd,
3970 u8 *data, u8 rsp_code, u8 amp_id)
3972 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3973 struct l2cap_conn_rsp rsp;
3974 struct l2cap_chan *chan = NULL, *pchan;
3975 int result, status = L2CAP_CS_NO_INFO;
3977 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3978 __le16 psm = req->psm;
3980 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3982 /* Check if we have socket listening on psm */
3983 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3984 &conn->hcon->dst, ACL_LINK);
3986 result = L2CAP_CR_BAD_PSM;
3990 mutex_lock(&conn->chan_lock);
3991 l2cap_chan_lock(pchan);
3993 /* Check if the ACL is secure enough (if not SDP) */
3994 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3995 !hci_conn_check_link_mode(conn->hcon)) {
3996 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3997 result = L2CAP_CR_SEC_BLOCK;
4001 result = L2CAP_CR_NO_MEM;
4003 /* Check if we already have channel with that dcid */
4004 if (__l2cap_get_chan_by_dcid(conn, scid))
4007 chan = pchan->ops->new_connection(pchan);
4011 /* For certain devices (ex: HID mouse), support for authentication,
4012 * pairing and bonding is optional. For such devices, inorder to avoid
4013 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4014 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4016 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4018 bacpy(&chan->src, &conn->hcon->src);
4019 bacpy(&chan->dst, &conn->hcon->dst);
4020 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4021 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4024 chan->local_amp_id = amp_id;
4026 __l2cap_chan_add(conn, chan);
4030 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4032 chan->ident = cmd->ident;
4034 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4035 if (l2cap_chan_check_security(chan)) {
4036 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4037 l2cap_state_change(chan, BT_CONNECT2);
4038 result = L2CAP_CR_PEND;
4039 status = L2CAP_CS_AUTHOR_PEND;
4040 chan->ops->defer(chan);
4042 /* Force pending result for AMP controllers.
4043 * The connection will succeed after the
4044 * physical link is up.
4046 if (amp_id == AMP_ID_BREDR) {
4047 l2cap_state_change(chan, BT_CONFIG);
4048 result = L2CAP_CR_SUCCESS;
4050 l2cap_state_change(chan, BT_CONNECT2);
4051 result = L2CAP_CR_PEND;
4053 status = L2CAP_CS_NO_INFO;
4056 l2cap_state_change(chan, BT_CONNECT2);
4057 result = L2CAP_CR_PEND;
4058 status = L2CAP_CS_AUTHEN_PEND;
4061 l2cap_state_change(chan, BT_CONNECT2);
4062 result = L2CAP_CR_PEND;
4063 status = L2CAP_CS_NO_INFO;
4067 l2cap_chan_unlock(pchan);
4068 mutex_unlock(&conn->chan_lock);
4071 rsp.scid = cpu_to_le16(scid);
4072 rsp.dcid = cpu_to_le16(dcid);
4073 rsp.result = cpu_to_le16(result);
4074 rsp.status = cpu_to_le16(status);
4075 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4077 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4078 struct l2cap_info_req info;
4079 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4081 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4082 conn->info_ident = l2cap_get_ident(conn);
4084 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4086 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4087 sizeof(info), &info);
4090 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4091 result == L2CAP_CR_SUCCESS) {
4093 set_bit(CONF_REQ_SENT, &chan->conf_state);
4094 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4095 l2cap_build_conf_req(chan, buf), buf);
4096 chan->num_conf_req++;
4102 static int l2cap_connect_req(struct l2cap_conn *conn,
4103 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4105 struct hci_dev *hdev = conn->hcon->hdev;
4106 struct hci_conn *hcon = conn->hcon;
4108 if (cmd_len < sizeof(struct l2cap_conn_req))
4112 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4113 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4114 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4115 hcon->dst_type, 0, NULL, 0,
4117 hci_dev_unlock(hdev);
4119 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4123 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4124 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4127 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4128 u16 scid, dcid, result, status;
4129 struct l2cap_chan *chan;
4133 if (cmd_len < sizeof(*rsp))
4136 scid = __le16_to_cpu(rsp->scid);
4137 dcid = __le16_to_cpu(rsp->dcid);
4138 result = __le16_to_cpu(rsp->result);
4139 status = __le16_to_cpu(rsp->status);
4141 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4142 dcid, scid, result, status);
4144 mutex_lock(&conn->chan_lock);
4147 chan = __l2cap_get_chan_by_scid(conn, scid);
4153 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4162 l2cap_chan_lock(chan);
4165 case L2CAP_CR_SUCCESS:
4166 l2cap_state_change(chan, BT_CONFIG);
4169 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4171 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4174 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4175 l2cap_build_conf_req(chan, req), req);
4176 chan->num_conf_req++;
4180 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4184 l2cap_chan_del(chan, ECONNREFUSED);
4188 l2cap_chan_unlock(chan);
4191 mutex_unlock(&conn->chan_lock);
4196 static inline void set_default_fcs(struct l2cap_chan *chan)
4198 /* FCS is enabled only in ERTM or streaming mode, if one or both
4201 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4202 chan->fcs = L2CAP_FCS_NONE;
4203 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4204 chan->fcs = L2CAP_FCS_CRC16;
4207 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4208 u8 ident, u16 flags)
4210 struct l2cap_conn *conn = chan->conn;
4212 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4215 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4216 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4218 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4219 l2cap_build_conf_rsp(chan, data,
4220 L2CAP_CONF_SUCCESS, flags), data);
4223 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4226 struct l2cap_cmd_rej_cid rej;
4228 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4229 rej.scid = __cpu_to_le16(scid);
4230 rej.dcid = __cpu_to_le16(dcid);
4232 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4235 static inline int l2cap_config_req(struct l2cap_conn *conn,
4236 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4239 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4242 struct l2cap_chan *chan;
4245 if (cmd_len < sizeof(*req))
4248 dcid = __le16_to_cpu(req->dcid);
4249 flags = __le16_to_cpu(req->flags);
4251 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4253 chan = l2cap_get_chan_by_scid(conn, dcid);
4255 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4259 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4260 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4265 /* Reject if config buffer is too small. */
4266 len = cmd_len - sizeof(*req);
4267 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4268 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4269 l2cap_build_conf_rsp(chan, rsp,
4270 L2CAP_CONF_REJECT, flags), rsp);
4275 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4276 chan->conf_len += len;
4278 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4279 /* Incomplete config. Send empty response. */
4280 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4281 l2cap_build_conf_rsp(chan, rsp,
4282 L2CAP_CONF_SUCCESS, flags), rsp);
4286 /* Complete config. */
4287 len = l2cap_parse_conf_req(chan, rsp);
4289 l2cap_send_disconn_req(chan, ECONNRESET);
4293 chan->ident = cmd->ident;
4294 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4295 chan->num_conf_rsp++;
4297 /* Reset config buffer. */
4300 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4303 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4304 set_default_fcs(chan);
4306 if (chan->mode == L2CAP_MODE_ERTM ||
4307 chan->mode == L2CAP_MODE_STREAMING)
4308 err = l2cap_ertm_init(chan);
4311 l2cap_send_disconn_req(chan, -err);
4313 l2cap_chan_ready(chan);
4318 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4320 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4321 l2cap_build_conf_req(chan, buf), buf);
4322 chan->num_conf_req++;
4325 /* Got Conf Rsp PENDING from remote side and asume we sent
4326 Conf Rsp PENDING in the code above */
4327 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4328 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4330 /* check compatibility */
4332 /* Send rsp for BR/EDR channel */
4334 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4336 chan->ident = cmd->ident;
4340 l2cap_chan_unlock(chan);
4344 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4345 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4348 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4349 u16 scid, flags, result;
4350 struct l2cap_chan *chan;
4351 int len = cmd_len - sizeof(*rsp);
4354 if (cmd_len < sizeof(*rsp))
4357 scid = __le16_to_cpu(rsp->scid);
4358 flags = __le16_to_cpu(rsp->flags);
4359 result = __le16_to_cpu(rsp->result);
4361 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4364 chan = l2cap_get_chan_by_scid(conn, scid);
4369 case L2CAP_CONF_SUCCESS:
4370 l2cap_conf_rfc_get(chan, rsp->data, len);
4371 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4374 case L2CAP_CONF_PENDING:
4375 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4377 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4380 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4383 l2cap_send_disconn_req(chan, ECONNRESET);
4387 if (!chan->hs_hcon) {
4388 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4391 if (l2cap_check_efs(chan)) {
4392 amp_create_logical_link(chan);
4393 chan->ident = cmd->ident;
4399 case L2CAP_CONF_UNACCEPT:
4400 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4403 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4404 l2cap_send_disconn_req(chan, ECONNRESET);
4408 /* throw out any old stored conf requests */
4409 result = L2CAP_CONF_SUCCESS;
4410 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4413 l2cap_send_disconn_req(chan, ECONNRESET);
4417 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4418 L2CAP_CONF_REQ, len, req);
4419 chan->num_conf_req++;
4420 if (result != L2CAP_CONF_SUCCESS)
4426 l2cap_chan_set_err(chan, ECONNRESET);
4428 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4429 l2cap_send_disconn_req(chan, ECONNRESET);
4433 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4436 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4438 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4439 set_default_fcs(chan);
4441 if (chan->mode == L2CAP_MODE_ERTM ||
4442 chan->mode == L2CAP_MODE_STREAMING)
4443 err = l2cap_ertm_init(chan);
4446 l2cap_send_disconn_req(chan, -err);
4448 l2cap_chan_ready(chan);
4452 l2cap_chan_unlock(chan);
4456 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4457 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4460 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4461 struct l2cap_disconn_rsp rsp;
4463 struct l2cap_chan *chan;
4465 if (cmd_len != sizeof(*req))
4468 scid = __le16_to_cpu(req->scid);
4469 dcid = __le16_to_cpu(req->dcid);
4471 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4473 mutex_lock(&conn->chan_lock);
4475 chan = __l2cap_get_chan_by_scid(conn, dcid);
4477 mutex_unlock(&conn->chan_lock);
4478 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4482 l2cap_chan_lock(chan);
4484 rsp.dcid = cpu_to_le16(chan->scid);
4485 rsp.scid = cpu_to_le16(chan->dcid);
4486 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4488 chan->ops->set_shutdown(chan);
4490 l2cap_chan_hold(chan);
4491 l2cap_chan_del(chan, ECONNRESET);
4493 l2cap_chan_unlock(chan);
4495 chan->ops->close(chan);
4496 l2cap_chan_put(chan);
4498 mutex_unlock(&conn->chan_lock);
4503 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4504 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4507 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4509 struct l2cap_chan *chan;
4511 if (cmd_len != sizeof(*rsp))
4514 scid = __le16_to_cpu(rsp->scid);
4515 dcid = __le16_to_cpu(rsp->dcid);
4517 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4519 mutex_lock(&conn->chan_lock);
4521 chan = __l2cap_get_chan_by_scid(conn, scid);
4523 mutex_unlock(&conn->chan_lock);
4527 l2cap_chan_lock(chan);
4529 l2cap_chan_hold(chan);
4530 l2cap_chan_del(chan, 0);
4532 l2cap_chan_unlock(chan);
4534 chan->ops->close(chan);
4535 l2cap_chan_put(chan);
4537 mutex_unlock(&conn->chan_lock);
4542 static inline int l2cap_information_req(struct l2cap_conn *conn,
4543 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4546 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4549 if (cmd_len != sizeof(*req))
4552 type = __le16_to_cpu(req->type);
4554 BT_DBG("type 0x%4.4x", type);
4556 if (type == L2CAP_IT_FEAT_MASK) {
4558 u32 feat_mask = l2cap_feat_mask;
4559 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4560 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4561 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4563 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4565 if (conn->hs_enabled)
4566 feat_mask |= L2CAP_FEAT_EXT_FLOW
4567 | L2CAP_FEAT_EXT_WINDOW;
4569 put_unaligned_le32(feat_mask, rsp->data);
4570 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4572 } else if (type == L2CAP_IT_FIXED_CHAN) {
4574 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4576 if (conn->hs_enabled)
4577 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4579 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4581 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4582 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4583 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4584 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4587 struct l2cap_info_rsp rsp;
4588 rsp.type = cpu_to_le16(type);
4589 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4590 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4597 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4598 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4601 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4604 if (cmd_len < sizeof(*rsp))
4607 type = __le16_to_cpu(rsp->type);
4608 result = __le16_to_cpu(rsp->result);
4610 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4612 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4613 if (cmd->ident != conn->info_ident ||
4614 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4617 cancel_delayed_work(&conn->info_timer);
4619 if (result != L2CAP_IR_SUCCESS) {
4620 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4621 conn->info_ident = 0;
4623 l2cap_conn_start(conn);
4629 case L2CAP_IT_FEAT_MASK:
4630 conn->feat_mask = get_unaligned_le32(rsp->data);
4632 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4633 struct l2cap_info_req req;
4634 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4636 conn->info_ident = l2cap_get_ident(conn);
4638 l2cap_send_cmd(conn, conn->info_ident,
4639 L2CAP_INFO_REQ, sizeof(req), &req);
4641 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4642 conn->info_ident = 0;
4644 l2cap_conn_start(conn);
4648 case L2CAP_IT_FIXED_CHAN:
4649 conn->fixed_chan_mask = rsp->data[0];
4650 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4651 conn->info_ident = 0;
4653 l2cap_conn_start(conn);
4660 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4661 struct l2cap_cmd_hdr *cmd,
4662 u16 cmd_len, void *data)
4664 struct l2cap_create_chan_req *req = data;
4665 struct l2cap_create_chan_rsp rsp;
4666 struct l2cap_chan *chan;
4667 struct hci_dev *hdev;
4670 if (cmd_len != sizeof(*req))
4673 if (!conn->hs_enabled)
4676 psm = le16_to_cpu(req->psm);
4677 scid = le16_to_cpu(req->scid);
4679 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4681 /* For controller id 0 make BR/EDR connection */
4682 if (req->amp_id == AMP_ID_BREDR) {
4683 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4688 /* Validate AMP controller id */
4689 hdev = hci_dev_get(req->amp_id);
4693 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4698 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4701 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4702 struct hci_conn *hs_hcon;
4704 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4708 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4713 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4715 mgr->bredr_chan = chan;
4716 chan->hs_hcon = hs_hcon;
4717 chan->fcs = L2CAP_FCS_NONE;
4718 conn->mtu = hdev->block_mtu;
4727 rsp.scid = cpu_to_le16(scid);
4728 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4729 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4731 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4737 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4739 struct l2cap_move_chan_req req;
4742 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4744 ident = l2cap_get_ident(chan->conn);
4745 chan->ident = ident;
4747 req.icid = cpu_to_le16(chan->scid);
4748 req.dest_amp_id = dest_amp_id;
4750 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4753 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4756 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4758 struct l2cap_move_chan_rsp rsp;
4760 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4762 rsp.icid = cpu_to_le16(chan->dcid);
4763 rsp.result = cpu_to_le16(result);
4765 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4769 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4771 struct l2cap_move_chan_cfm cfm;
4773 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4775 chan->ident = l2cap_get_ident(chan->conn);
4777 cfm.icid = cpu_to_le16(chan->scid);
4778 cfm.result = cpu_to_le16(result);
4780 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4783 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4786 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4788 struct l2cap_move_chan_cfm cfm;
4790 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4792 cfm.icid = cpu_to_le16(icid);
4793 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4795 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4799 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4802 struct l2cap_move_chan_cfm_rsp rsp;
4804 BT_DBG("icid 0x%4.4x", icid);
4806 rsp.icid = cpu_to_le16(icid);
4807 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4810 static void __release_logical_link(struct l2cap_chan *chan)
4812 chan->hs_hchan = NULL;
4813 chan->hs_hcon = NULL;
4815 /* Placeholder - release the logical link */
4818 static void l2cap_logical_fail(struct l2cap_chan *chan)
4820 /* Logical link setup failed */
4821 if (chan->state != BT_CONNECTED) {
4822 /* Create channel failure, disconnect */
4823 l2cap_send_disconn_req(chan, ECONNRESET);
4827 switch (chan->move_role) {
4828 case L2CAP_MOVE_ROLE_RESPONDER:
4829 l2cap_move_done(chan);
4830 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4832 case L2CAP_MOVE_ROLE_INITIATOR:
4833 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4834 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4835 /* Remote has only sent pending or
4836 * success responses, clean up
4838 l2cap_move_done(chan);
4841 /* Other amp move states imply that the move
4842 * has already aborted
4844 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4849 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4850 struct hci_chan *hchan)
4852 struct l2cap_conf_rsp rsp;
4854 chan->hs_hchan = hchan;
4855 chan->hs_hcon->l2cap_data = chan->conn;
4857 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4859 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4862 set_default_fcs(chan);
4864 err = l2cap_ertm_init(chan);
4866 l2cap_send_disconn_req(chan, -err);
4868 l2cap_chan_ready(chan);
4872 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4873 struct hci_chan *hchan)
4875 chan->hs_hcon = hchan->conn;
4876 chan->hs_hcon->l2cap_data = chan->conn;
4878 BT_DBG("move_state %d", chan->move_state);
4880 switch (chan->move_state) {
4881 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4882 /* Move confirm will be sent after a success
4883 * response is received
4885 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4887 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4888 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4889 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4890 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4891 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4892 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4893 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4894 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4895 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4899 /* Move was not in expected state, free the channel */
4900 __release_logical_link(chan);
4902 chan->move_state = L2CAP_MOVE_STABLE;
4906 /* Call with chan locked */
4907 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4910 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4913 l2cap_logical_fail(chan);
4914 __release_logical_link(chan);
4918 if (chan->state != BT_CONNECTED) {
4919 /* Ignore logical link if channel is on BR/EDR */
4920 if (chan->local_amp_id != AMP_ID_BREDR)
4921 l2cap_logical_finish_create(chan, hchan);
4923 l2cap_logical_finish_move(chan, hchan);
4927 void l2cap_move_start(struct l2cap_chan *chan)
4929 BT_DBG("chan %p", chan);
4931 if (chan->local_amp_id == AMP_ID_BREDR) {
4932 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4934 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4935 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4936 /* Placeholder - start physical link setup */
4938 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4939 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4941 l2cap_move_setup(chan);
4942 l2cap_send_move_chan_req(chan, 0);
4946 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4947 u8 local_amp_id, u8 remote_amp_id)
4949 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4950 local_amp_id, remote_amp_id);
4952 chan->fcs = L2CAP_FCS_NONE;
4954 /* Outgoing channel on AMP */
4955 if (chan->state == BT_CONNECT) {
4956 if (result == L2CAP_CR_SUCCESS) {
4957 chan->local_amp_id = local_amp_id;
4958 l2cap_send_create_chan_req(chan, remote_amp_id);
4960 /* Revert to BR/EDR connect */
4961 l2cap_send_conn_req(chan);
4967 /* Incoming channel on AMP */
4968 if (__l2cap_no_conn_pending(chan)) {
4969 struct l2cap_conn_rsp rsp;
4971 rsp.scid = cpu_to_le16(chan->dcid);
4972 rsp.dcid = cpu_to_le16(chan->scid);
4974 if (result == L2CAP_CR_SUCCESS) {
4975 /* Send successful response */
4976 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4977 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4979 /* Send negative response */
4980 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4981 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4984 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4987 if (result == L2CAP_CR_SUCCESS) {
4988 l2cap_state_change(chan, BT_CONFIG);
4989 set_bit(CONF_REQ_SENT, &chan->conf_state);
4990 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4992 l2cap_build_conf_req(chan, buf), buf);
4993 chan->num_conf_req++;
4998 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5001 l2cap_move_setup(chan);
5002 chan->move_id = local_amp_id;
5003 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5005 l2cap_send_move_chan_req(chan, remote_amp_id);
5008 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5010 struct hci_chan *hchan = NULL;
5012 /* Placeholder - get hci_chan for logical link */
5015 if (hchan->state == BT_CONNECTED) {
5016 /* Logical link is ready to go */
5017 chan->hs_hcon = hchan->conn;
5018 chan->hs_hcon->l2cap_data = chan->conn;
5019 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5020 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5022 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5024 /* Wait for logical link to be ready */
5025 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5028 /* Logical link not available */
5029 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5033 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5035 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5037 if (result == -EINVAL)
5038 rsp_result = L2CAP_MR_BAD_ID;
5040 rsp_result = L2CAP_MR_NOT_ALLOWED;
5042 l2cap_send_move_chan_rsp(chan, rsp_result);
5045 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5046 chan->move_state = L2CAP_MOVE_STABLE;
5048 /* Restart data transmission */
5049 l2cap_ertm_send(chan);
5052 /* Invoke with locked chan */
5053 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5055 u8 local_amp_id = chan->local_amp_id;
5056 u8 remote_amp_id = chan->remote_amp_id;
5058 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5059 chan, result, local_amp_id, remote_amp_id);
5061 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5062 l2cap_chan_unlock(chan);
5066 if (chan->state != BT_CONNECTED) {
5067 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5068 } else if (result != L2CAP_MR_SUCCESS) {
5069 l2cap_do_move_cancel(chan, result);
5071 switch (chan->move_role) {
5072 case L2CAP_MOVE_ROLE_INITIATOR:
5073 l2cap_do_move_initiate(chan, local_amp_id,
5076 case L2CAP_MOVE_ROLE_RESPONDER:
5077 l2cap_do_move_respond(chan, result);
5080 l2cap_do_move_cancel(chan, result);
5086 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5087 struct l2cap_cmd_hdr *cmd,
5088 u16 cmd_len, void *data)
5090 struct l2cap_move_chan_req *req = data;
5091 struct l2cap_move_chan_rsp rsp;
5092 struct l2cap_chan *chan;
5094 u16 result = L2CAP_MR_NOT_ALLOWED;
5096 if (cmd_len != sizeof(*req))
5099 icid = le16_to_cpu(req->icid);
5101 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5103 if (!conn->hs_enabled)
5106 chan = l2cap_get_chan_by_dcid(conn, icid);
5108 rsp.icid = cpu_to_le16(icid);
5109 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5110 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5115 chan->ident = cmd->ident;
5117 if (chan->scid < L2CAP_CID_DYN_START ||
5118 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5119 (chan->mode != L2CAP_MODE_ERTM &&
5120 chan->mode != L2CAP_MODE_STREAMING)) {
5121 result = L2CAP_MR_NOT_ALLOWED;
5122 goto send_move_response;
5125 if (chan->local_amp_id == req->dest_amp_id) {
5126 result = L2CAP_MR_SAME_ID;
5127 goto send_move_response;
5130 if (req->dest_amp_id != AMP_ID_BREDR) {
5131 struct hci_dev *hdev;
5132 hdev = hci_dev_get(req->dest_amp_id);
5133 if (!hdev || hdev->dev_type != HCI_AMP ||
5134 !test_bit(HCI_UP, &hdev->flags)) {
5138 result = L2CAP_MR_BAD_ID;
5139 goto send_move_response;
5144 /* Detect a move collision. Only send a collision response
5145 * if this side has "lost", otherwise proceed with the move.
5146 * The winner has the larger bd_addr.
5148 if ((__chan_is_moving(chan) ||
5149 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5150 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5151 result = L2CAP_MR_COLLISION;
5152 goto send_move_response;
5155 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5156 l2cap_move_setup(chan);
5157 chan->move_id = req->dest_amp_id;
5160 if (req->dest_amp_id == AMP_ID_BREDR) {
5161 /* Moving to BR/EDR */
5162 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5163 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5164 result = L2CAP_MR_PEND;
5166 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5167 result = L2CAP_MR_SUCCESS;
5170 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5171 /* Placeholder - uncomment when amp functions are available */
5172 /*amp_accept_physical(chan, req->dest_amp_id);*/
5173 result = L2CAP_MR_PEND;
5177 l2cap_send_move_chan_rsp(chan, result);
5179 l2cap_chan_unlock(chan);
5184 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5186 struct l2cap_chan *chan;
5187 struct hci_chan *hchan = NULL;
5189 chan = l2cap_get_chan_by_scid(conn, icid);
5191 l2cap_send_move_chan_cfm_icid(conn, icid);
5195 __clear_chan_timer(chan);
5196 if (result == L2CAP_MR_PEND)
5197 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5199 switch (chan->move_state) {
5200 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5201 /* Move confirm will be sent when logical link
5204 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5206 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5207 if (result == L2CAP_MR_PEND) {
5209 } else if (test_bit(CONN_LOCAL_BUSY,
5210 &chan->conn_state)) {
5211 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5213 /* Logical link is up or moving to BR/EDR,
5216 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5217 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5220 case L2CAP_MOVE_WAIT_RSP:
5222 if (result == L2CAP_MR_SUCCESS) {
5223 /* Remote is ready, send confirm immediately
5224 * after logical link is ready
5226 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5228 /* Both logical link and move success
5229 * are required to confirm
5231 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5234 /* Placeholder - get hci_chan for logical link */
5236 /* Logical link not available */
5237 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5241 /* If the logical link is not yet connected, do not
5242 * send confirmation.
5244 if (hchan->state != BT_CONNECTED)
5247 /* Logical link is already ready to go */
5249 chan->hs_hcon = hchan->conn;
5250 chan->hs_hcon->l2cap_data = chan->conn;
5252 if (result == L2CAP_MR_SUCCESS) {
5253 /* Can confirm now */
5254 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5256 /* Now only need move success
5259 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5262 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5265 /* Any other amp move state means the move failed. */
5266 chan->move_id = chan->local_amp_id;
5267 l2cap_move_done(chan);
5268 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5271 l2cap_chan_unlock(chan);
5274 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5277 struct l2cap_chan *chan;
5279 chan = l2cap_get_chan_by_ident(conn, ident);
5281 /* Could not locate channel, icid is best guess */
5282 l2cap_send_move_chan_cfm_icid(conn, icid);
5286 __clear_chan_timer(chan);
5288 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5289 if (result == L2CAP_MR_COLLISION) {
5290 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5292 /* Cleanup - cancel move */
5293 chan->move_id = chan->local_amp_id;
5294 l2cap_move_done(chan);
5298 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5300 l2cap_chan_unlock(chan);
5303 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5304 struct l2cap_cmd_hdr *cmd,
5305 u16 cmd_len, void *data)
5307 struct l2cap_move_chan_rsp *rsp = data;
5310 if (cmd_len != sizeof(*rsp))
5313 icid = le16_to_cpu(rsp->icid);
5314 result = le16_to_cpu(rsp->result);
5316 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5318 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5319 l2cap_move_continue(conn, icid, result);
5321 l2cap_move_fail(conn, cmd->ident, icid, result);
5326 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5327 struct l2cap_cmd_hdr *cmd,
5328 u16 cmd_len, void *data)
5330 struct l2cap_move_chan_cfm *cfm = data;
5331 struct l2cap_chan *chan;
5334 if (cmd_len != sizeof(*cfm))
5337 icid = le16_to_cpu(cfm->icid);
5338 result = le16_to_cpu(cfm->result);
5340 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5342 chan = l2cap_get_chan_by_dcid(conn, icid);
5344 /* Spec requires a response even if the icid was not found */
5345 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5349 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5350 if (result == L2CAP_MC_CONFIRMED) {
5351 chan->local_amp_id = chan->move_id;
5352 if (chan->local_amp_id == AMP_ID_BREDR)
5353 __release_logical_link(chan);
5355 chan->move_id = chan->local_amp_id;
5358 l2cap_move_done(chan);
5361 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5363 l2cap_chan_unlock(chan);
5368 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5369 struct l2cap_cmd_hdr *cmd,
5370 u16 cmd_len, void *data)
5372 struct l2cap_move_chan_cfm_rsp *rsp = data;
5373 struct l2cap_chan *chan;
5376 if (cmd_len != sizeof(*rsp))
5379 icid = le16_to_cpu(rsp->icid);
5381 BT_DBG("icid 0x%4.4x", icid);
5383 chan = l2cap_get_chan_by_scid(conn, icid);
5387 __clear_chan_timer(chan);
5389 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5390 chan->local_amp_id = chan->move_id;
5392 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5393 __release_logical_link(chan);
5395 l2cap_move_done(chan);
5398 l2cap_chan_unlock(chan);
5403 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5408 if (min > max || min < 6 || max > 3200)
5411 if (to_multiplier < 10 || to_multiplier > 3200)
5414 if (max >= to_multiplier * 8)
5417 max_latency = (to_multiplier * 8 / max) - 1;
5418 if (latency > 499 || latency > max_latency)
5424 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5425 struct l2cap_cmd_hdr *cmd,
5426 u16 cmd_len, u8 *data)
5428 struct hci_conn *hcon = conn->hcon;
5429 struct l2cap_conn_param_update_req *req;
5430 struct l2cap_conn_param_update_rsp rsp;
5431 u16 min, max, latency, to_multiplier;
5434 if (!(hcon->link_mode & HCI_LM_MASTER))
5437 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5440 req = (struct l2cap_conn_param_update_req *) data;
5441 min = __le16_to_cpu(req->min);
5442 max = __le16_to_cpu(req->max);
5443 latency = __le16_to_cpu(req->latency);
5444 to_multiplier = __le16_to_cpu(req->to_multiplier);
5446 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5447 min, max, latency, to_multiplier);
5449 memset(&rsp, 0, sizeof(rsp));
5451 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5453 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5455 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5457 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5461 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5466 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5467 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5470 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5471 u16 dcid, mtu, mps, credits, result;
5472 struct l2cap_chan *chan;
5475 if (cmd_len < sizeof(*rsp))
5478 dcid = __le16_to_cpu(rsp->dcid);
5479 mtu = __le16_to_cpu(rsp->mtu);
5480 mps = __le16_to_cpu(rsp->mps);
5481 credits = __le16_to_cpu(rsp->credits);
5482 result = __le16_to_cpu(rsp->result);
5484 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5487 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5488 dcid, mtu, mps, credits, result);
5490 mutex_lock(&conn->chan_lock);
5492 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5500 l2cap_chan_lock(chan);
5503 case L2CAP_CR_SUCCESS:
5507 chan->remote_mps = mps;
5508 chan->tx_credits = credits;
5509 l2cap_chan_ready(chan);
5513 l2cap_chan_del(chan, ECONNREFUSED);
5517 l2cap_chan_unlock(chan);
5520 mutex_unlock(&conn->chan_lock);
5525 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5526 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5531 switch (cmd->code) {
5532 case L2CAP_COMMAND_REJ:
5533 l2cap_command_rej(conn, cmd, cmd_len, data);
5536 case L2CAP_CONN_REQ:
5537 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5540 case L2CAP_CONN_RSP:
5541 case L2CAP_CREATE_CHAN_RSP:
5542 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5545 case L2CAP_CONF_REQ:
5546 err = l2cap_config_req(conn, cmd, cmd_len, data);
5549 case L2CAP_CONF_RSP:
5550 l2cap_config_rsp(conn, cmd, cmd_len, data);
5553 case L2CAP_DISCONN_REQ:
5554 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5557 case L2CAP_DISCONN_RSP:
5558 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5561 case L2CAP_ECHO_REQ:
5562 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5565 case L2CAP_ECHO_RSP:
5568 case L2CAP_INFO_REQ:
5569 err = l2cap_information_req(conn, cmd, cmd_len, data);
5572 case L2CAP_INFO_RSP:
5573 l2cap_information_rsp(conn, cmd, cmd_len, data);
5576 case L2CAP_CREATE_CHAN_REQ:
5577 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5580 case L2CAP_MOVE_CHAN_REQ:
5581 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5584 case L2CAP_MOVE_CHAN_RSP:
5585 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5588 case L2CAP_MOVE_CHAN_CFM:
5589 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5592 case L2CAP_MOVE_CHAN_CFM_RSP:
5593 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5597 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5605 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5606 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5609 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5610 struct l2cap_le_conn_rsp rsp;
5611 struct l2cap_chan *chan, *pchan;
5612 u16 dcid, scid, credits, mtu, mps;
5616 if (cmd_len != sizeof(*req))
5619 scid = __le16_to_cpu(req->scid);
5620 mtu = __le16_to_cpu(req->mtu);
5621 mps = __le16_to_cpu(req->mps);
5626 if (mtu < 23 || mps < 23)
5629 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5632 /* Check if we have socket listening on psm */
5633 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5634 &conn->hcon->dst, LE_LINK);
5636 result = L2CAP_CR_BAD_PSM;
5641 mutex_lock(&conn->chan_lock);
5642 l2cap_chan_lock(pchan);
5644 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5645 result = L2CAP_CR_AUTHENTICATION;
5647 goto response_unlock;
5650 /* Check if we already have channel with that dcid */
5651 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5652 result = L2CAP_CR_NO_MEM;
5654 goto response_unlock;
5657 chan = pchan->ops->new_connection(pchan);
5659 result = L2CAP_CR_NO_MEM;
5660 goto response_unlock;
5663 bacpy(&chan->src, &conn->hcon->src);
5664 bacpy(&chan->dst, &conn->hcon->dst);
5665 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5666 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5670 chan->remote_mps = mps;
5671 chan->tx_credits = __le16_to_cpu(req->credits);
5673 __l2cap_chan_add(conn, chan);
5675 credits = chan->rx_credits;
5677 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5679 chan->ident = cmd->ident;
5681 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5682 l2cap_state_change(chan, BT_CONNECT2);
5683 result = L2CAP_CR_PEND;
5684 chan->ops->defer(chan);
5686 l2cap_chan_ready(chan);
5687 result = L2CAP_CR_SUCCESS;
5691 l2cap_chan_unlock(pchan);
5692 mutex_unlock(&conn->chan_lock);
5694 if (result == L2CAP_CR_PEND)
5699 rsp.mtu = cpu_to_le16(chan->imtu);
5700 rsp.mps = cpu_to_le16(chan->mps);
5706 rsp.dcid = cpu_to_le16(dcid);
5707 rsp.credits = cpu_to_le16(credits);
5708 rsp.result = cpu_to_le16(result);
5710 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5715 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5716 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5719 struct l2cap_le_credits *pkt;
5720 struct l2cap_chan *chan;
5723 if (cmd_len != sizeof(*pkt))
5726 pkt = (struct l2cap_le_credits *) data;
5727 cid = __le16_to_cpu(pkt->cid);
5728 credits = __le16_to_cpu(pkt->credits);
5730 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5732 chan = l2cap_get_chan_by_dcid(conn, cid);
5736 chan->tx_credits += credits;
5738 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5739 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5743 if (chan->tx_credits)
5744 chan->ops->resume(chan);
5746 l2cap_chan_unlock(chan);
5751 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5752 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5757 if (!enable_lecoc) {
5758 switch (cmd->code) {
5759 case L2CAP_LE_CONN_REQ:
5760 case L2CAP_LE_CONN_RSP:
5761 case L2CAP_LE_CREDITS:
5762 case L2CAP_DISCONN_REQ:
5763 case L2CAP_DISCONN_RSP:
5768 switch (cmd->code) {
5769 case L2CAP_COMMAND_REJ:
5772 case L2CAP_CONN_PARAM_UPDATE_REQ:
5773 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5776 case L2CAP_CONN_PARAM_UPDATE_RSP:
5779 case L2CAP_LE_CONN_RSP:
5780 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5783 case L2CAP_LE_CONN_REQ:
5784 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5787 case L2CAP_LE_CREDITS:
5788 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5791 case L2CAP_DISCONN_REQ:
5792 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5795 case L2CAP_DISCONN_RSP:
5796 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5800 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5808 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5809 struct sk_buff *skb)
5811 struct hci_conn *hcon = conn->hcon;
5812 struct l2cap_cmd_hdr *cmd;
5816 if (hcon->type != LE_LINK)
5819 if (skb->len < L2CAP_CMD_HDR_SIZE)
5822 cmd = (void *) skb->data;
5823 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5825 len = le16_to_cpu(cmd->len);
5827 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5829 if (len != skb->len || !cmd->ident) {
5830 BT_DBG("corrupted command");
5834 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5836 struct l2cap_cmd_rej_unk rej;
5838 BT_ERR("Wrong link type (%d)", err);
5840 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5841 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5849 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5850 struct sk_buff *skb)
5852 struct hci_conn *hcon = conn->hcon;
5853 u8 *data = skb->data;
5855 struct l2cap_cmd_hdr cmd;
5858 l2cap_raw_recv(conn, skb);
5860 if (hcon->type != ACL_LINK)
5863 while (len >= L2CAP_CMD_HDR_SIZE) {
5865 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5866 data += L2CAP_CMD_HDR_SIZE;
5867 len -= L2CAP_CMD_HDR_SIZE;
5869 cmd_len = le16_to_cpu(cmd.len);
5871 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5874 if (cmd_len > len || !cmd.ident) {
5875 BT_DBG("corrupted command");
5879 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5881 struct l2cap_cmd_rej_unk rej;
5883 BT_ERR("Wrong link type (%d)", err);
5885 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5886 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5898 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5900 u16 our_fcs, rcv_fcs;
5903 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5904 hdr_size = L2CAP_EXT_HDR_SIZE;
5906 hdr_size = L2CAP_ENH_HDR_SIZE;
5908 if (chan->fcs == L2CAP_FCS_CRC16) {
5909 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5910 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5911 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5913 if (our_fcs != rcv_fcs)
5919 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5921 struct l2cap_ctrl control;
5923 BT_DBG("chan %p", chan);
5925 memset(&control, 0, sizeof(control));
5928 control.reqseq = chan->buffer_seq;
5929 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5931 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5932 control.super = L2CAP_SUPER_RNR;
5933 l2cap_send_sframe(chan, &control);
5936 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5937 chan->unacked_frames > 0)
5938 __set_retrans_timer(chan);
5940 /* Send pending iframes */
5941 l2cap_ertm_send(chan);
5943 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5944 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5945 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5948 control.super = L2CAP_SUPER_RR;
5949 l2cap_send_sframe(chan, &control);
5953 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5954 struct sk_buff **last_frag)
5956 /* skb->len reflects data in skb as well as all fragments
5957 * skb->data_len reflects only data in fragments
5959 if (!skb_has_frag_list(skb))
5960 skb_shinfo(skb)->frag_list = new_frag;
5962 new_frag->next = NULL;
5964 (*last_frag)->next = new_frag;
5965 *last_frag = new_frag;
5967 skb->len += new_frag->len;
5968 skb->data_len += new_frag->len;
5969 skb->truesize += new_frag->truesize;
5972 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5973 struct l2cap_ctrl *control)
5977 switch (control->sar) {
5978 case L2CAP_SAR_UNSEGMENTED:
5982 err = chan->ops->recv(chan, skb);
5985 case L2CAP_SAR_START:
5989 chan->sdu_len = get_unaligned_le16(skb->data);
5990 skb_pull(skb, L2CAP_SDULEN_SIZE);
5992 if (chan->sdu_len > chan->imtu) {
5997 if (skb->len >= chan->sdu_len)
6001 chan->sdu_last_frag = skb;
6007 case L2CAP_SAR_CONTINUE:
6011 append_skb_frag(chan->sdu, skb,
6012 &chan->sdu_last_frag);
6015 if (chan->sdu->len >= chan->sdu_len)
6025 append_skb_frag(chan->sdu, skb,
6026 &chan->sdu_last_frag);
6029 if (chan->sdu->len != chan->sdu_len)
6032 err = chan->ops->recv(chan, chan->sdu);
6035 /* Reassembly complete */
6037 chan->sdu_last_frag = NULL;
6045 kfree_skb(chan->sdu);
6047 chan->sdu_last_frag = NULL;
6054 static int l2cap_resegment(struct l2cap_chan *chan)
6060 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6064 if (chan->mode != L2CAP_MODE_ERTM)
6067 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6068 l2cap_tx(chan, NULL, NULL, event);
6071 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6074 /* Pass sequential frames to l2cap_reassemble_sdu()
6075 * until a gap is encountered.
6078 BT_DBG("chan %p", chan);
6080 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6081 struct sk_buff *skb;
6082 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6083 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6085 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6090 skb_unlink(skb, &chan->srej_q);
6091 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6092 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6097 if (skb_queue_empty(&chan->srej_q)) {
6098 chan->rx_state = L2CAP_RX_STATE_RECV;
6099 l2cap_send_ack(chan);
6105 static void l2cap_handle_srej(struct l2cap_chan *chan,
6106 struct l2cap_ctrl *control)
6108 struct sk_buff *skb;
6110 BT_DBG("chan %p, control %p", chan, control);
6112 if (control->reqseq == chan->next_tx_seq) {
6113 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6114 l2cap_send_disconn_req(chan, ECONNRESET);
6118 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6121 BT_DBG("Seq %d not available for retransmission",
6126 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6127 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6128 l2cap_send_disconn_req(chan, ECONNRESET);
6132 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6134 if (control->poll) {
6135 l2cap_pass_to_tx(chan, control);
6137 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6138 l2cap_retransmit(chan, control);
6139 l2cap_ertm_send(chan);
6141 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6142 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6143 chan->srej_save_reqseq = control->reqseq;
6146 l2cap_pass_to_tx_fbit(chan, control);
6148 if (control->final) {
6149 if (chan->srej_save_reqseq != control->reqseq ||
6150 !test_and_clear_bit(CONN_SREJ_ACT,
6152 l2cap_retransmit(chan, control);
6154 l2cap_retransmit(chan, control);
6155 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6156 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6157 chan->srej_save_reqseq = control->reqseq;
6163 static void l2cap_handle_rej(struct l2cap_chan *chan,
6164 struct l2cap_ctrl *control)
6166 struct sk_buff *skb;
6168 BT_DBG("chan %p, control %p", chan, control);
6170 if (control->reqseq == chan->next_tx_seq) {
6171 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6172 l2cap_send_disconn_req(chan, ECONNRESET);
6176 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6178 if (chan->max_tx && skb &&
6179 bt_cb(skb)->control.retries >= chan->max_tx) {
6180 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6181 l2cap_send_disconn_req(chan, ECONNRESET);
6185 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6187 l2cap_pass_to_tx(chan, control);
6189 if (control->final) {
6190 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6191 l2cap_retransmit_all(chan, control);
6193 l2cap_retransmit_all(chan, control);
6194 l2cap_ertm_send(chan);
6195 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6196 set_bit(CONN_REJ_ACT, &chan->conn_state);
6200 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6202 BT_DBG("chan %p, txseq %d", chan, txseq);
6204 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6205 chan->expected_tx_seq);
6207 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6208 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6210 /* See notes below regarding "double poll" and
6213 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6214 BT_DBG("Invalid/Ignore - after SREJ");
6215 return L2CAP_TXSEQ_INVALID_IGNORE;
6217 BT_DBG("Invalid - in window after SREJ sent");
6218 return L2CAP_TXSEQ_INVALID;
6222 if (chan->srej_list.head == txseq) {
6223 BT_DBG("Expected SREJ");
6224 return L2CAP_TXSEQ_EXPECTED_SREJ;
6227 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6228 BT_DBG("Duplicate SREJ - txseq already stored");
6229 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6232 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6233 BT_DBG("Unexpected SREJ - not requested");
6234 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6238 if (chan->expected_tx_seq == txseq) {
6239 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6241 BT_DBG("Invalid - txseq outside tx window");
6242 return L2CAP_TXSEQ_INVALID;
6245 return L2CAP_TXSEQ_EXPECTED;
6249 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6250 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6251 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6252 return L2CAP_TXSEQ_DUPLICATE;
6255 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6256 /* A source of invalid packets is a "double poll" condition,
6257 * where delays cause us to send multiple poll packets. If
6258 * the remote stack receives and processes both polls,
6259 * sequence numbers can wrap around in such a way that a
6260 * resent frame has a sequence number that looks like new data
6261 * with a sequence gap. This would trigger an erroneous SREJ
6264 * Fortunately, this is impossible with a tx window that's
6265 * less than half of the maximum sequence number, which allows
6266 * invalid frames to be safely ignored.
6268 * With tx window sizes greater than half of the tx window
6269 * maximum, the frame is invalid and cannot be ignored. This
6270 * causes a disconnect.
6273 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6274 BT_DBG("Invalid/Ignore - txseq outside tx window");
6275 return L2CAP_TXSEQ_INVALID_IGNORE;
6277 BT_DBG("Invalid - txseq outside tx window");
6278 return L2CAP_TXSEQ_INVALID;
6281 BT_DBG("Unexpected - txseq indicates missing frames");
6282 return L2CAP_TXSEQ_UNEXPECTED;
6286 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6287 struct l2cap_ctrl *control,
6288 struct sk_buff *skb, u8 event)
6291 bool skb_in_use = false;
6293 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6297 case L2CAP_EV_RECV_IFRAME:
6298 switch (l2cap_classify_txseq(chan, control->txseq)) {
6299 case L2CAP_TXSEQ_EXPECTED:
6300 l2cap_pass_to_tx(chan, control);
6302 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6303 BT_DBG("Busy, discarding expected seq %d",
6308 chan->expected_tx_seq = __next_seq(chan,
6311 chan->buffer_seq = chan->expected_tx_seq;
6314 err = l2cap_reassemble_sdu(chan, skb, control);
6318 if (control->final) {
6319 if (!test_and_clear_bit(CONN_REJ_ACT,
6320 &chan->conn_state)) {
6322 l2cap_retransmit_all(chan, control);
6323 l2cap_ertm_send(chan);
6327 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6328 l2cap_send_ack(chan);
6330 case L2CAP_TXSEQ_UNEXPECTED:
6331 l2cap_pass_to_tx(chan, control);
6333 /* Can't issue SREJ frames in the local busy state.
6334 * Drop this frame, it will be seen as missing
6335 * when local busy is exited.
6337 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6338 BT_DBG("Busy, discarding unexpected seq %d",
6343 /* There was a gap in the sequence, so an SREJ
6344 * must be sent for each missing frame. The
6345 * current frame is stored for later use.
6347 skb_queue_tail(&chan->srej_q, skb);
6349 BT_DBG("Queued %p (queue len %d)", skb,
6350 skb_queue_len(&chan->srej_q));
6352 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6353 l2cap_seq_list_clear(&chan->srej_list);
6354 l2cap_send_srej(chan, control->txseq);
6356 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6358 case L2CAP_TXSEQ_DUPLICATE:
6359 l2cap_pass_to_tx(chan, control);
6361 case L2CAP_TXSEQ_INVALID_IGNORE:
6363 case L2CAP_TXSEQ_INVALID:
6365 l2cap_send_disconn_req(chan, ECONNRESET);
6369 case L2CAP_EV_RECV_RR:
6370 l2cap_pass_to_tx(chan, control);
6371 if (control->final) {
6372 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6374 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6375 !__chan_is_moving(chan)) {
6377 l2cap_retransmit_all(chan, control);
6380 l2cap_ertm_send(chan);
6381 } else if (control->poll) {
6382 l2cap_send_i_or_rr_or_rnr(chan);
6384 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6385 &chan->conn_state) &&
6386 chan->unacked_frames)
6387 __set_retrans_timer(chan);
6389 l2cap_ertm_send(chan);
6392 case L2CAP_EV_RECV_RNR:
6393 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6394 l2cap_pass_to_tx(chan, control);
6395 if (control && control->poll) {
6396 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6397 l2cap_send_rr_or_rnr(chan, 0);
6399 __clear_retrans_timer(chan);
6400 l2cap_seq_list_clear(&chan->retrans_list);
6402 case L2CAP_EV_RECV_REJ:
6403 l2cap_handle_rej(chan, control);
6405 case L2CAP_EV_RECV_SREJ:
6406 l2cap_handle_srej(chan, control);
6412 if (skb && !skb_in_use) {
6413 BT_DBG("Freeing %p", skb);
6420 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6421 struct l2cap_ctrl *control,
6422 struct sk_buff *skb, u8 event)
6425 u16 txseq = control->txseq;
6426 bool skb_in_use = false;
6428 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6432 case L2CAP_EV_RECV_IFRAME:
6433 switch (l2cap_classify_txseq(chan, txseq)) {
6434 case L2CAP_TXSEQ_EXPECTED:
6435 /* Keep frame for reassembly later */
6436 l2cap_pass_to_tx(chan, control);
6437 skb_queue_tail(&chan->srej_q, skb);
6439 BT_DBG("Queued %p (queue len %d)", skb,
6440 skb_queue_len(&chan->srej_q));
6442 chan->expected_tx_seq = __next_seq(chan, txseq);
6444 case L2CAP_TXSEQ_EXPECTED_SREJ:
6445 l2cap_seq_list_pop(&chan->srej_list);
6447 l2cap_pass_to_tx(chan, control);
6448 skb_queue_tail(&chan->srej_q, skb);
6450 BT_DBG("Queued %p (queue len %d)", skb,
6451 skb_queue_len(&chan->srej_q));
6453 err = l2cap_rx_queued_iframes(chan);
6458 case L2CAP_TXSEQ_UNEXPECTED:
6459 /* Got a frame that can't be reassembled yet.
6460 * Save it for later, and send SREJs to cover
6461 * the missing frames.
6463 skb_queue_tail(&chan->srej_q, skb);
6465 BT_DBG("Queued %p (queue len %d)", skb,
6466 skb_queue_len(&chan->srej_q));
6468 l2cap_pass_to_tx(chan, control);
6469 l2cap_send_srej(chan, control->txseq);
6471 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6472 /* This frame was requested with an SREJ, but
6473 * some expected retransmitted frames are
6474 * missing. Request retransmission of missing
6477 skb_queue_tail(&chan->srej_q, skb);
6479 BT_DBG("Queued %p (queue len %d)", skb,
6480 skb_queue_len(&chan->srej_q));
6482 l2cap_pass_to_tx(chan, control);
6483 l2cap_send_srej_list(chan, control->txseq);
6485 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6486 /* We've already queued this frame. Drop this copy. */
6487 l2cap_pass_to_tx(chan, control);
6489 case L2CAP_TXSEQ_DUPLICATE:
6490 /* Expecting a later sequence number, so this frame
6491 * was already received. Ignore it completely.
6494 case L2CAP_TXSEQ_INVALID_IGNORE:
6496 case L2CAP_TXSEQ_INVALID:
6498 l2cap_send_disconn_req(chan, ECONNRESET);
6502 case L2CAP_EV_RECV_RR:
6503 l2cap_pass_to_tx(chan, control);
6504 if (control->final) {
6505 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6507 if (!test_and_clear_bit(CONN_REJ_ACT,
6508 &chan->conn_state)) {
6510 l2cap_retransmit_all(chan, control);
6513 l2cap_ertm_send(chan);
6514 } else if (control->poll) {
6515 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6516 &chan->conn_state) &&
6517 chan->unacked_frames) {
6518 __set_retrans_timer(chan);
6521 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6522 l2cap_send_srej_tail(chan);
6524 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6525 &chan->conn_state) &&
6526 chan->unacked_frames)
6527 __set_retrans_timer(chan);
6529 l2cap_send_ack(chan);
6532 case L2CAP_EV_RECV_RNR:
6533 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6534 l2cap_pass_to_tx(chan, control);
6535 if (control->poll) {
6536 l2cap_send_srej_tail(chan);
6538 struct l2cap_ctrl rr_control;
6539 memset(&rr_control, 0, sizeof(rr_control));
6540 rr_control.sframe = 1;
6541 rr_control.super = L2CAP_SUPER_RR;
6542 rr_control.reqseq = chan->buffer_seq;
6543 l2cap_send_sframe(chan, &rr_control);
6547 case L2CAP_EV_RECV_REJ:
6548 l2cap_handle_rej(chan, control);
6550 case L2CAP_EV_RECV_SREJ:
6551 l2cap_handle_srej(chan, control);
6555 if (skb && !skb_in_use) {
6556 BT_DBG("Freeing %p", skb);
6563 static int l2cap_finish_move(struct l2cap_chan *chan)
6565 BT_DBG("chan %p", chan);
6567 chan->rx_state = L2CAP_RX_STATE_RECV;
6570 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6572 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6574 return l2cap_resegment(chan);
6577 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6578 struct l2cap_ctrl *control,
6579 struct sk_buff *skb, u8 event)
6583 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6589 l2cap_process_reqseq(chan, control->reqseq);
6591 if (!skb_queue_empty(&chan->tx_q))
6592 chan->tx_send_head = skb_peek(&chan->tx_q);
6594 chan->tx_send_head = NULL;
6596 /* Rewind next_tx_seq to the point expected
6599 chan->next_tx_seq = control->reqseq;
6600 chan->unacked_frames = 0;
6602 err = l2cap_finish_move(chan);
6606 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6607 l2cap_send_i_or_rr_or_rnr(chan);
6609 if (event == L2CAP_EV_RECV_IFRAME)
6612 return l2cap_rx_state_recv(chan, control, NULL, event);
6615 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6616 struct l2cap_ctrl *control,
6617 struct sk_buff *skb, u8 event)
6621 if (!control->final)
6624 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6626 chan->rx_state = L2CAP_RX_STATE_RECV;
6627 l2cap_process_reqseq(chan, control->reqseq);
6629 if (!skb_queue_empty(&chan->tx_q))
6630 chan->tx_send_head = skb_peek(&chan->tx_q);
6632 chan->tx_send_head = NULL;
6634 /* Rewind next_tx_seq to the point expected
6637 chan->next_tx_seq = control->reqseq;
6638 chan->unacked_frames = 0;
6641 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6643 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6645 err = l2cap_resegment(chan);
6648 err = l2cap_rx_state_recv(chan, control, skb, event);
6653 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6655 /* Make sure reqseq is for a packet that has been sent but not acked */
6658 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6659 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6662 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6663 struct sk_buff *skb, u8 event)
6667 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6668 control, skb, event, chan->rx_state);
6670 if (__valid_reqseq(chan, control->reqseq)) {
6671 switch (chan->rx_state) {
6672 case L2CAP_RX_STATE_RECV:
6673 err = l2cap_rx_state_recv(chan, control, skb, event);
6675 case L2CAP_RX_STATE_SREJ_SENT:
6676 err = l2cap_rx_state_srej_sent(chan, control, skb,
6679 case L2CAP_RX_STATE_WAIT_P:
6680 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6682 case L2CAP_RX_STATE_WAIT_F:
6683 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6690 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6691 control->reqseq, chan->next_tx_seq,
6692 chan->expected_ack_seq);
6693 l2cap_send_disconn_req(chan, ECONNRESET);
6699 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6700 struct sk_buff *skb)
6704 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6707 if (l2cap_classify_txseq(chan, control->txseq) ==
6708 L2CAP_TXSEQ_EXPECTED) {
6709 l2cap_pass_to_tx(chan, control);
6711 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6712 __next_seq(chan, chan->buffer_seq));
6714 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6716 l2cap_reassemble_sdu(chan, skb, control);
6719 kfree_skb(chan->sdu);
6722 chan->sdu_last_frag = NULL;
6726 BT_DBG("Freeing %p", skb);
6731 chan->last_acked_seq = control->txseq;
6732 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6737 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6739 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6743 __unpack_control(chan, skb);
6748 * We can just drop the corrupted I-frame here.
6749 * Receiver will miss it and start proper recovery
6750 * procedures and ask for retransmission.
6752 if (l2cap_check_fcs(chan, skb))
6755 if (!control->sframe && control->sar == L2CAP_SAR_START)
6756 len -= L2CAP_SDULEN_SIZE;
6758 if (chan->fcs == L2CAP_FCS_CRC16)
6759 len -= L2CAP_FCS_SIZE;
6761 if (len > chan->mps) {
6762 l2cap_send_disconn_req(chan, ECONNRESET);
6766 if (!control->sframe) {
6769 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6770 control->sar, control->reqseq, control->final,
6773 /* Validate F-bit - F=0 always valid, F=1 only
6774 * valid in TX WAIT_F
6776 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6779 if (chan->mode != L2CAP_MODE_STREAMING) {
6780 event = L2CAP_EV_RECV_IFRAME;
6781 err = l2cap_rx(chan, control, skb, event);
6783 err = l2cap_stream_rx(chan, control, skb);
6787 l2cap_send_disconn_req(chan, ECONNRESET);
6789 const u8 rx_func_to_event[4] = {
6790 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6791 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6794 /* Only I-frames are expected in streaming mode */
6795 if (chan->mode == L2CAP_MODE_STREAMING)
6798 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6799 control->reqseq, control->final, control->poll,
6803 BT_ERR("Trailing bytes: %d in sframe", len);
6804 l2cap_send_disconn_req(chan, ECONNRESET);
6808 /* Validate F and P bits */
6809 if (control->final && (control->poll ||
6810 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6813 event = rx_func_to_event[control->super];
6814 if (l2cap_rx(chan, control, skb, event))
6815 l2cap_send_disconn_req(chan, ECONNRESET);
6825 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6827 struct l2cap_conn *conn = chan->conn;
6828 struct l2cap_le_credits pkt;
6831 /* We return more credits to the sender only after the amount of
6832 * credits falls below half of the initial amount.
6834 if (chan->rx_credits >= (L2CAP_LE_MAX_CREDITS + 1) / 2)
6837 return_credits = L2CAP_LE_MAX_CREDITS - chan->rx_credits;
6839 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6841 chan->rx_credits += return_credits;
6843 pkt.cid = cpu_to_le16(chan->scid);
6844 pkt.credits = cpu_to_le16(return_credits);
6846 chan->ident = l2cap_get_ident(conn);
6848 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6851 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6855 if (!chan->rx_credits) {
6856 BT_ERR("No credits to receive LE L2CAP data");
6860 if (chan->imtu < skb->len) {
6861 BT_ERR("Too big LE L2CAP PDU");
6866 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6868 l2cap_chan_le_send_credits(chan);
6875 sdu_len = get_unaligned_le16(skb->data);
6876 skb_pull(skb, L2CAP_SDULEN_SIZE);
6878 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6879 sdu_len, skb->len, chan->imtu);
6881 if (sdu_len > chan->imtu) {
6882 BT_ERR("Too big LE L2CAP SDU length received");
6887 if (skb->len > sdu_len) {
6888 BT_ERR("Too much LE L2CAP data received");
6893 if (skb->len == sdu_len)
6894 return chan->ops->recv(chan, skb);
6897 chan->sdu_len = sdu_len;
6898 chan->sdu_last_frag = skb;
6903 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6904 chan->sdu->len, skb->len, chan->sdu_len);
6906 if (chan->sdu->len + skb->len > chan->sdu_len) {
6907 BT_ERR("Too much LE L2CAP data received");
6912 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6915 if (chan->sdu->len == chan->sdu_len) {
6916 err = chan->ops->recv(chan, chan->sdu);
6919 chan->sdu_last_frag = NULL;
6927 kfree_skb(chan->sdu);
6929 chan->sdu_last_frag = NULL;
6933 /* We can't return an error here since we took care of the skb
6934 * freeing internally. An error return would cause the caller to
6935 * do a double-free of the skb.
6940 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6941 struct sk_buff *skb)
6943 struct l2cap_chan *chan;
6945 chan = l2cap_get_chan_by_scid(conn, cid);
6947 if (cid == L2CAP_CID_A2MP) {
6948 chan = a2mp_channel_create(conn, skb);
6954 l2cap_chan_lock(chan);
6956 BT_DBG("unknown cid 0x%4.4x", cid);
6957 /* Drop packet and return */
6963 BT_DBG("chan %p, len %d", chan, skb->len);
6965 if (chan->state != BT_CONNECTED)
6968 switch (chan->mode) {
6969 case L2CAP_MODE_LE_FLOWCTL:
6970 if (l2cap_le_data_rcv(chan, skb) < 0)
6975 case L2CAP_MODE_BASIC:
6976 /* If socket recv buffers overflows we drop data here
6977 * which is *bad* because L2CAP has to be reliable.
6978 * But we don't have any other choice. L2CAP doesn't
6979 * provide flow control mechanism. */
6981 if (chan->imtu < skb->len)
6984 if (!chan->ops->recv(chan, skb))
6988 case L2CAP_MODE_ERTM:
6989 case L2CAP_MODE_STREAMING:
6990 l2cap_data_rcv(chan, skb);
6994 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7002 l2cap_chan_unlock(chan);
7005 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7006 struct sk_buff *skb)
7008 struct hci_conn *hcon = conn->hcon;
7009 struct l2cap_chan *chan;
7011 if (hcon->type != ACL_LINK)
7014 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7019 BT_DBG("chan %p, len %d", chan, skb->len);
7021 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7024 if (chan->imtu < skb->len)
7027 /* Store remote BD_ADDR and PSM for msg_name */
7028 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7029 bt_cb(skb)->psm = psm;
7031 if (!chan->ops->recv(chan, skb))
7038 static void l2cap_att_channel(struct l2cap_conn *conn,
7039 struct sk_buff *skb)
7041 struct hci_conn *hcon = conn->hcon;
7042 struct l2cap_chan *chan;
7044 if (hcon->type != LE_LINK)
7047 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7048 &hcon->src, &hcon->dst);
7052 BT_DBG("chan %p, len %d", chan, skb->len);
7054 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7057 if (chan->imtu < skb->len)
7060 if (!chan->ops->recv(chan, skb))
7067 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7069 struct l2cap_hdr *lh = (void *) skb->data;
7073 skb_pull(skb, L2CAP_HDR_SIZE);
7074 cid = __le16_to_cpu(lh->cid);
7075 len = __le16_to_cpu(lh->len);
7077 if (len != skb->len) {
7082 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7085 case L2CAP_CID_SIGNALING:
7086 l2cap_sig_channel(conn, skb);
7089 case L2CAP_CID_CONN_LESS:
7090 psm = get_unaligned((__le16 *) skb->data);
7091 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7092 l2cap_conless_channel(conn, psm, skb);
7096 l2cap_att_channel(conn, skb);
7099 case L2CAP_CID_LE_SIGNALING:
7100 l2cap_le_sig_channel(conn, skb);
7104 if (smp_sig_channel(conn, skb))
7105 l2cap_conn_del(conn->hcon, EACCES);
7109 l2cap_data_channel(conn, cid, skb);
7114 /* ---- L2CAP interface with lower layer (HCI) ---- */
7116 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7118 int exact = 0, lm1 = 0, lm2 = 0;
7119 struct l2cap_chan *c;
7121 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7123 /* Find listening sockets and check their link_mode */
7124 read_lock(&chan_list_lock);
7125 list_for_each_entry(c, &chan_list, global_l) {
7126 if (c->state != BT_LISTEN)
7129 if (!bacmp(&c->src, &hdev->bdaddr)) {
7130 lm1 |= HCI_LM_ACCEPT;
7131 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7132 lm1 |= HCI_LM_MASTER;
7134 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7135 lm2 |= HCI_LM_ACCEPT;
7136 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7137 lm2 |= HCI_LM_MASTER;
7140 read_unlock(&chan_list_lock);
7142 return exact ? lm1 : lm2;
7145 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7147 struct l2cap_conn *conn;
7149 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7152 conn = l2cap_conn_add(hcon);
7154 l2cap_conn_ready(conn);
7156 l2cap_conn_del(hcon, bt_to_errno(status));
7160 int l2cap_disconn_ind(struct hci_conn *hcon)
7162 struct l2cap_conn *conn = hcon->l2cap_data;
7164 BT_DBG("hcon %p", hcon);
7167 return HCI_ERROR_REMOTE_USER_TERM;
7168 return conn->disc_reason;
7171 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7173 BT_DBG("hcon %p reason %d", hcon, reason);
7175 l2cap_conn_del(hcon, bt_to_errno(reason));
7178 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7180 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7183 if (encrypt == 0x00) {
7184 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7185 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7186 } else if (chan->sec_level == BT_SECURITY_HIGH)
7187 l2cap_chan_close(chan, ECONNREFUSED);
7189 if (chan->sec_level == BT_SECURITY_MEDIUM)
7190 __clear_chan_timer(chan);
7194 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7196 struct l2cap_conn *conn = hcon->l2cap_data;
7197 struct l2cap_chan *chan;
7202 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7204 if (hcon->type == LE_LINK) {
7205 if (!status && encrypt)
7206 smp_distribute_keys(conn, 0);
7207 cancel_delayed_work(&conn->security_timer);
7210 mutex_lock(&conn->chan_lock);
7212 list_for_each_entry(chan, &conn->chan_l, list) {
7213 l2cap_chan_lock(chan);
7215 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7216 state_to_string(chan->state));
7218 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7219 l2cap_chan_unlock(chan);
7223 if (chan->scid == L2CAP_CID_ATT) {
7224 if (!status && encrypt) {
7225 chan->sec_level = hcon->sec_level;
7226 l2cap_chan_ready(chan);
7229 l2cap_chan_unlock(chan);
7233 if (!__l2cap_no_conn_pending(chan)) {
7234 l2cap_chan_unlock(chan);
7238 if (!status && (chan->state == BT_CONNECTED ||
7239 chan->state == BT_CONFIG)) {
7240 chan->ops->resume(chan);
7241 l2cap_check_encryption(chan, encrypt);
7242 l2cap_chan_unlock(chan);
7246 if (chan->state == BT_CONNECT) {
7248 l2cap_start_connection(chan);
7250 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7251 } else if (chan->state == BT_CONNECT2) {
7252 struct l2cap_conn_rsp rsp;
7256 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7257 res = L2CAP_CR_PEND;
7258 stat = L2CAP_CS_AUTHOR_PEND;
7259 chan->ops->defer(chan);
7261 l2cap_state_change(chan, BT_CONFIG);
7262 res = L2CAP_CR_SUCCESS;
7263 stat = L2CAP_CS_NO_INFO;
7266 l2cap_state_change(chan, BT_DISCONN);
7267 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7268 res = L2CAP_CR_SEC_BLOCK;
7269 stat = L2CAP_CS_NO_INFO;
7272 rsp.scid = cpu_to_le16(chan->dcid);
7273 rsp.dcid = cpu_to_le16(chan->scid);
7274 rsp.result = cpu_to_le16(res);
7275 rsp.status = cpu_to_le16(stat);
7276 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7279 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7280 res == L2CAP_CR_SUCCESS) {
7282 set_bit(CONF_REQ_SENT, &chan->conf_state);
7283 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7285 l2cap_build_conf_req(chan, buf),
7287 chan->num_conf_req++;
7291 l2cap_chan_unlock(chan);
7294 mutex_unlock(&conn->chan_lock);
7299 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7301 struct l2cap_conn *conn = hcon->l2cap_data;
7302 struct l2cap_hdr *hdr;
7305 /* For AMP controller do not create l2cap conn */
7306 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7310 conn = l2cap_conn_add(hcon);
7315 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7319 case ACL_START_NO_FLUSH:
7322 BT_ERR("Unexpected start frame (len %d)", skb->len);
7323 kfree_skb(conn->rx_skb);
7324 conn->rx_skb = NULL;
7326 l2cap_conn_unreliable(conn, ECOMM);
7329 /* Start fragment always begin with Basic L2CAP header */
7330 if (skb->len < L2CAP_HDR_SIZE) {
7331 BT_ERR("Frame is too short (len %d)", skb->len);
7332 l2cap_conn_unreliable(conn, ECOMM);
7336 hdr = (struct l2cap_hdr *) skb->data;
7337 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7339 if (len == skb->len) {
7340 /* Complete frame received */
7341 l2cap_recv_frame(conn, skb);
7345 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7347 if (skb->len > len) {
7348 BT_ERR("Frame is too long (len %d, expected len %d)",
7350 l2cap_conn_unreliable(conn, ECOMM);
7354 /* Allocate skb for the complete frame (with header) */
7355 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7359 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7361 conn->rx_len = len - skb->len;
7365 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7367 if (!conn->rx_len) {
7368 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7369 l2cap_conn_unreliable(conn, ECOMM);
7373 if (skb->len > conn->rx_len) {
7374 BT_ERR("Fragment is too long (len %d, expected %d)",
7375 skb->len, conn->rx_len);
7376 kfree_skb(conn->rx_skb);
7377 conn->rx_skb = NULL;
7379 l2cap_conn_unreliable(conn, ECOMM);
7383 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7385 conn->rx_len -= skb->len;
7387 if (!conn->rx_len) {
7388 /* Complete frame received. l2cap_recv_frame
7389 * takes ownership of the skb so set the global
7390 * rx_skb pointer to NULL first.
7392 struct sk_buff *rx_skb = conn->rx_skb;
7393 conn->rx_skb = NULL;
7394 l2cap_recv_frame(conn, rx_skb);
7404 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7406 struct l2cap_chan *c;
7408 read_lock(&chan_list_lock);
7410 list_for_each_entry(c, &chan_list, global_l) {
7411 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7413 c->state, __le16_to_cpu(c->psm),
7414 c->scid, c->dcid, c->imtu, c->omtu,
7415 c->sec_level, c->mode);
7418 read_unlock(&chan_list_lock);
7423 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7425 return single_open(file, l2cap_debugfs_show, inode->i_private);
7428 static const struct file_operations l2cap_debugfs_fops = {
7429 .open = l2cap_debugfs_open,
7431 .llseek = seq_lseek,
7432 .release = single_release,
7435 static struct dentry *l2cap_debugfs;
7437 int __init l2cap_init(void)
7441 err = l2cap_init_sockets();
7445 if (IS_ERR_OR_NULL(bt_debugfs))
7448 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7449 NULL, &l2cap_debugfs_fops);
7454 void l2cap_exit(void)
7456 debugfs_remove(l2cap_debugfs);
7457 l2cap_cleanup_sockets();
7460 module_param(disable_ertm, bool, 0644);
7461 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");