2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
495 chan->imtu = L2CAP_DEFAULT_MTU;
496 chan->omtu = L2CAP_LE_MIN_MTU;
497 chan->mode = L2CAP_MODE_LE_FLOWCTL;
498 chan->tx_credits = 0;
499 chan->rx_credits = L2CAP_LE_MAX_CREDITS;
501 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
502 chan->mps = chan->imtu;
504 chan->mps = L2CAP_LE_DEFAULT_MPS;
507 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
509 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
510 __le16_to_cpu(chan->psm), chan->dcid);
512 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
516 switch (chan->chan_type) {
517 case L2CAP_CHAN_CONN_ORIENTED:
518 if (conn->hcon->type == LE_LINK) {
520 chan->omtu = L2CAP_DEFAULT_MTU;
521 if (chan->dcid == L2CAP_CID_ATT)
522 chan->scid = L2CAP_CID_ATT;
524 chan->scid = l2cap_alloc_cid(conn);
526 /* Alloc CID for connection-oriented socket */
527 chan->scid = l2cap_alloc_cid(conn);
528 chan->omtu = L2CAP_DEFAULT_MTU;
532 case L2CAP_CHAN_CONN_LESS:
533 /* Connectionless socket */
534 chan->scid = L2CAP_CID_CONN_LESS;
535 chan->dcid = L2CAP_CID_CONN_LESS;
536 chan->omtu = L2CAP_DEFAULT_MTU;
539 case L2CAP_CHAN_CONN_FIX_A2MP:
540 chan->scid = L2CAP_CID_A2MP;
541 chan->dcid = L2CAP_CID_A2MP;
542 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
543 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
547 /* Raw socket can send/recv signalling messages only */
548 chan->scid = L2CAP_CID_SIGNALING;
549 chan->dcid = L2CAP_CID_SIGNALING;
550 chan->omtu = L2CAP_DEFAULT_MTU;
553 chan->local_id = L2CAP_BESTEFFORT_ID;
554 chan->local_stype = L2CAP_SERV_BESTEFFORT;
555 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
556 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
557 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
558 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
560 l2cap_chan_hold(chan);
562 hci_conn_hold(conn->hcon);
564 list_add(&chan->list, &conn->chan_l);
567 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
569 mutex_lock(&conn->chan_lock);
570 __l2cap_chan_add(conn, chan);
571 mutex_unlock(&conn->chan_lock);
574 void l2cap_chan_del(struct l2cap_chan *chan, int err)
576 struct l2cap_conn *conn = chan->conn;
578 __clear_chan_timer(chan);
580 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
583 struct amp_mgr *mgr = conn->hcon->amp_mgr;
584 /* Delete from channel list */
585 list_del(&chan->list);
587 l2cap_chan_put(chan);
591 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
592 hci_conn_drop(conn->hcon);
594 if (mgr && mgr->bredr_chan == chan)
595 mgr->bredr_chan = NULL;
598 if (chan->hs_hchan) {
599 struct hci_chan *hs_hchan = chan->hs_hchan;
601 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
602 amp_disconnect_logical_link(hs_hchan);
605 chan->ops->teardown(chan, err);
607 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
611 case L2CAP_MODE_BASIC:
614 case L2CAP_MODE_LE_FLOWCTL:
615 skb_queue_purge(&chan->tx_q);
618 case L2CAP_MODE_ERTM:
619 __clear_retrans_timer(chan);
620 __clear_monitor_timer(chan);
621 __clear_ack_timer(chan);
623 skb_queue_purge(&chan->srej_q);
625 l2cap_seq_list_free(&chan->srej_list);
626 l2cap_seq_list_free(&chan->retrans_list);
630 case L2CAP_MODE_STREAMING:
631 skb_queue_purge(&chan->tx_q);
638 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
640 struct l2cap_conn *conn = chan->conn;
641 struct l2cap_le_conn_rsp rsp;
644 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
645 result = L2CAP_CR_AUTHORIZATION;
647 result = L2CAP_CR_BAD_PSM;
649 l2cap_state_change(chan, BT_DISCONN);
651 rsp.dcid = cpu_to_le16(chan->scid);
652 rsp.mtu = cpu_to_le16(chan->imtu);
653 rsp.mps = cpu_to_le16(chan->mps);
654 rsp.credits = cpu_to_le16(chan->rx_credits);
655 rsp.result = cpu_to_le16(result);
657 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
661 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
663 struct l2cap_conn *conn = chan->conn;
664 struct l2cap_conn_rsp rsp;
667 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
668 result = L2CAP_CR_SEC_BLOCK;
670 result = L2CAP_CR_BAD_PSM;
672 l2cap_state_change(chan, BT_DISCONN);
674 rsp.scid = cpu_to_le16(chan->dcid);
675 rsp.dcid = cpu_to_le16(chan->scid);
676 rsp.result = cpu_to_le16(result);
677 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
679 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
682 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
684 struct l2cap_conn *conn = chan->conn;
686 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
688 switch (chan->state) {
690 chan->ops->teardown(chan, 0);
695 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
696 * check for chan->psm.
698 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
699 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
700 l2cap_send_disconn_req(chan, reason);
702 l2cap_chan_del(chan, reason);
706 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
707 if (conn->hcon->type == ACL_LINK)
708 l2cap_chan_connect_reject(chan);
709 else if (conn->hcon->type == LE_LINK)
710 l2cap_chan_le_connect_reject(chan);
713 l2cap_chan_del(chan, reason);
718 l2cap_chan_del(chan, reason);
722 chan->ops->teardown(chan, 0);
727 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
729 switch (chan->chan_type) {
731 switch (chan->sec_level) {
732 case BT_SECURITY_HIGH:
733 return HCI_AT_DEDICATED_BONDING_MITM;
734 case BT_SECURITY_MEDIUM:
735 return HCI_AT_DEDICATED_BONDING;
737 return HCI_AT_NO_BONDING;
740 case L2CAP_CHAN_CONN_LESS:
741 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
745 if (chan->sec_level == BT_SECURITY_HIGH)
746 return HCI_AT_NO_BONDING_MITM;
748 return HCI_AT_NO_BONDING;
750 case L2CAP_CHAN_CONN_ORIENTED:
751 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
752 if (chan->sec_level == BT_SECURITY_LOW)
753 chan->sec_level = BT_SECURITY_SDP;
755 if (chan->sec_level == BT_SECURITY_HIGH)
756 return HCI_AT_NO_BONDING_MITM;
758 return HCI_AT_NO_BONDING;
762 switch (chan->sec_level) {
763 case BT_SECURITY_HIGH:
764 return HCI_AT_GENERAL_BONDING_MITM;
765 case BT_SECURITY_MEDIUM:
766 return HCI_AT_GENERAL_BONDING;
768 return HCI_AT_NO_BONDING;
774 /* Service level security */
775 int l2cap_chan_check_security(struct l2cap_chan *chan)
777 struct l2cap_conn *conn = chan->conn;
780 if (conn->hcon->type == LE_LINK)
781 return smp_conn_security(conn->hcon, chan->sec_level);
783 auth_type = l2cap_get_auth_type(chan);
785 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
788 static u8 l2cap_get_ident(struct l2cap_conn *conn)
792 /* Get next available identificator.
793 * 1 - 128 are used by kernel.
794 * 129 - 199 are reserved.
795 * 200 - 254 are used by utilities like l2ping, etc.
798 spin_lock(&conn->lock);
800 if (++conn->tx_ident > 128)
805 spin_unlock(&conn->lock);
810 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
813 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
816 BT_DBG("code 0x%2.2x", code);
821 if (lmp_no_flush_capable(conn->hcon->hdev))
822 flags = ACL_START_NO_FLUSH;
826 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
827 skb->priority = HCI_PRIO_MAX;
829 hci_send_acl(conn->hchan, skb, flags);
832 static bool __chan_is_moving(struct l2cap_chan *chan)
834 return chan->move_state != L2CAP_MOVE_STABLE &&
835 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
838 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
840 struct hci_conn *hcon = chan->conn->hcon;
843 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
846 if (chan->hs_hcon && !__chan_is_moving(chan)) {
848 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
855 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
856 lmp_no_flush_capable(hcon->hdev))
857 flags = ACL_START_NO_FLUSH;
861 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
862 hci_send_acl(chan->conn->hchan, skb, flags);
865 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
867 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
868 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
870 if (enh & L2CAP_CTRL_FRAME_TYPE) {
873 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
874 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
881 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
882 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
889 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
891 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
892 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
894 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
897 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
898 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
905 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
906 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
913 static inline void __unpack_control(struct l2cap_chan *chan,
916 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
917 __unpack_extended_control(get_unaligned_le32(skb->data),
918 &bt_cb(skb)->control);
919 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
921 __unpack_enhanced_control(get_unaligned_le16(skb->data),
922 &bt_cb(skb)->control);
923 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
927 static u32 __pack_extended_control(struct l2cap_ctrl *control)
931 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
932 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
934 if (control->sframe) {
935 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
936 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
937 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
939 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
940 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
946 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
950 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
951 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
953 if (control->sframe) {
954 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
955 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
956 packed |= L2CAP_CTRL_FRAME_TYPE;
958 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
959 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
965 static inline void __pack_control(struct l2cap_chan *chan,
966 struct l2cap_ctrl *control,
969 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
970 put_unaligned_le32(__pack_extended_control(control),
971 skb->data + L2CAP_HDR_SIZE);
973 put_unaligned_le16(__pack_enhanced_control(control),
974 skb->data + L2CAP_HDR_SIZE);
978 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
980 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
981 return L2CAP_EXT_HDR_SIZE;
983 return L2CAP_ENH_HDR_SIZE;
986 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
990 struct l2cap_hdr *lh;
991 int hlen = __ertm_hdr_size(chan);
993 if (chan->fcs == L2CAP_FCS_CRC16)
994 hlen += L2CAP_FCS_SIZE;
996 skb = bt_skb_alloc(hlen, GFP_KERNEL);
999 return ERR_PTR(-ENOMEM);
1001 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1002 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1003 lh->cid = cpu_to_le16(chan->dcid);
1005 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1006 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1008 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1010 if (chan->fcs == L2CAP_FCS_CRC16) {
1011 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1012 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1015 skb->priority = HCI_PRIO_MAX;
1019 static void l2cap_send_sframe(struct l2cap_chan *chan,
1020 struct l2cap_ctrl *control)
1022 struct sk_buff *skb;
1025 BT_DBG("chan %p, control %p", chan, control);
1027 if (!control->sframe)
1030 if (__chan_is_moving(chan))
1033 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1037 if (control->super == L2CAP_SUPER_RR)
1038 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1039 else if (control->super == L2CAP_SUPER_RNR)
1040 set_bit(CONN_RNR_SENT, &chan->conn_state);
1042 if (control->super != L2CAP_SUPER_SREJ) {
1043 chan->last_acked_seq = control->reqseq;
1044 __clear_ack_timer(chan);
1047 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1048 control->final, control->poll, control->super);
1050 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1051 control_field = __pack_extended_control(control);
1053 control_field = __pack_enhanced_control(control);
1055 skb = l2cap_create_sframe_pdu(chan, control_field);
1057 l2cap_do_send(chan, skb);
1060 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1062 struct l2cap_ctrl control;
1064 BT_DBG("chan %p, poll %d", chan, poll);
1066 memset(&control, 0, sizeof(control));
1068 control.poll = poll;
1070 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1071 control.super = L2CAP_SUPER_RNR;
1073 control.super = L2CAP_SUPER_RR;
1075 control.reqseq = chan->buffer_seq;
1076 l2cap_send_sframe(chan, &control);
1079 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1081 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1084 static bool __amp_capable(struct l2cap_chan *chan)
1086 struct l2cap_conn *conn = chan->conn;
1087 struct hci_dev *hdev;
1088 bool amp_available = false;
1090 if (!conn->hs_enabled)
1093 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1096 read_lock(&hci_dev_list_lock);
1097 list_for_each_entry(hdev, &hci_dev_list, list) {
1098 if (hdev->amp_type != AMP_TYPE_BREDR &&
1099 test_bit(HCI_UP, &hdev->flags)) {
1100 amp_available = true;
1104 read_unlock(&hci_dev_list_lock);
1106 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1107 return amp_available;
1112 static bool l2cap_check_efs(struct l2cap_chan *chan)
1114 /* Check EFS parameters */
1118 void l2cap_send_conn_req(struct l2cap_chan *chan)
1120 struct l2cap_conn *conn = chan->conn;
1121 struct l2cap_conn_req req;
1123 req.scid = cpu_to_le16(chan->scid);
1124 req.psm = chan->psm;
1126 chan->ident = l2cap_get_ident(conn);
1128 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1130 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1133 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1135 struct l2cap_create_chan_req req;
1136 req.scid = cpu_to_le16(chan->scid);
1137 req.psm = chan->psm;
1138 req.amp_id = amp_id;
1140 chan->ident = l2cap_get_ident(chan->conn);
1142 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1146 static void l2cap_move_setup(struct l2cap_chan *chan)
1148 struct sk_buff *skb;
1150 BT_DBG("chan %p", chan);
1152 if (chan->mode != L2CAP_MODE_ERTM)
1155 __clear_retrans_timer(chan);
1156 __clear_monitor_timer(chan);
1157 __clear_ack_timer(chan);
1159 chan->retry_count = 0;
1160 skb_queue_walk(&chan->tx_q, skb) {
1161 if (bt_cb(skb)->control.retries)
1162 bt_cb(skb)->control.retries = 1;
1167 chan->expected_tx_seq = chan->buffer_seq;
1169 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1170 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1171 l2cap_seq_list_clear(&chan->retrans_list);
1172 l2cap_seq_list_clear(&chan->srej_list);
1173 skb_queue_purge(&chan->srej_q);
1175 chan->tx_state = L2CAP_TX_STATE_XMIT;
1176 chan->rx_state = L2CAP_RX_STATE_MOVE;
1178 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1181 static void l2cap_move_done(struct l2cap_chan *chan)
1183 u8 move_role = chan->move_role;
1184 BT_DBG("chan %p", chan);
1186 chan->move_state = L2CAP_MOVE_STABLE;
1187 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1189 if (chan->mode != L2CAP_MODE_ERTM)
1192 switch (move_role) {
1193 case L2CAP_MOVE_ROLE_INITIATOR:
1194 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1195 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1197 case L2CAP_MOVE_ROLE_RESPONDER:
1198 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1203 static void l2cap_le_flowctl_start(struct l2cap_chan *chan)
1206 chan->sdu_last_frag = NULL;
1209 if (chan->imtu < L2CAP_LE_DEFAULT_MPS)
1210 chan->mps = chan->imtu;
1212 chan->mps = L2CAP_LE_DEFAULT_MPS;
1214 skb_queue_head_init(&chan->tx_q);
1216 if (!chan->tx_credits)
1217 chan->ops->suspend(chan);
1220 static void l2cap_chan_ready(struct l2cap_chan *chan)
1222 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1223 chan->conf_state = 0;
1224 __clear_chan_timer(chan);
1226 if (chan->mode == L2CAP_MODE_LE_FLOWCTL)
1227 l2cap_le_flowctl_start(chan);
1229 chan->state = BT_CONNECTED;
1231 chan->ops->ready(chan);
1234 static void l2cap_le_connect(struct l2cap_chan *chan)
1236 struct l2cap_conn *conn = chan->conn;
1237 struct l2cap_le_conn_req req;
1239 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1242 req.psm = chan->psm;
1243 req.scid = cpu_to_le16(chan->scid);
1244 req.mtu = cpu_to_le16(chan->imtu);
1245 req.mps = cpu_to_le16(chan->mps);
1246 req.credits = cpu_to_le16(chan->rx_credits);
1248 chan->ident = l2cap_get_ident(conn);
1250 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1254 static void l2cap_le_start(struct l2cap_chan *chan)
1256 struct l2cap_conn *conn = chan->conn;
1258 if (!smp_conn_security(conn->hcon, chan->sec_level))
1262 l2cap_chan_ready(chan);
1266 if (chan->state == BT_CONNECT)
1267 l2cap_le_connect(chan);
1270 static void l2cap_start_connection(struct l2cap_chan *chan)
1272 if (__amp_capable(chan)) {
1273 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1274 a2mp_discover_amp(chan);
1275 } else if (chan->conn->hcon->type == LE_LINK) {
1276 l2cap_le_start(chan);
1278 l2cap_send_conn_req(chan);
1282 static void l2cap_do_start(struct l2cap_chan *chan)
1284 struct l2cap_conn *conn = chan->conn;
1286 if (conn->hcon->type == LE_LINK) {
1287 l2cap_le_start(chan);
1291 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1292 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1295 if (l2cap_chan_check_security(chan) &&
1296 __l2cap_no_conn_pending(chan)) {
1297 l2cap_start_connection(chan);
1300 struct l2cap_info_req req;
1301 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1303 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1304 conn->info_ident = l2cap_get_ident(conn);
1306 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1308 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1313 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1315 u32 local_feat_mask = l2cap_feat_mask;
1317 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1320 case L2CAP_MODE_ERTM:
1321 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1322 case L2CAP_MODE_STREAMING:
1323 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1329 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1331 struct l2cap_conn *conn = chan->conn;
1332 struct l2cap_disconn_req req;
1337 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1338 __clear_retrans_timer(chan);
1339 __clear_monitor_timer(chan);
1340 __clear_ack_timer(chan);
1343 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1344 l2cap_state_change(chan, BT_DISCONN);
1348 req.dcid = cpu_to_le16(chan->dcid);
1349 req.scid = cpu_to_le16(chan->scid);
1350 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1353 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1356 /* ---- L2CAP connections ---- */
1357 static void l2cap_conn_start(struct l2cap_conn *conn)
1359 struct l2cap_chan *chan, *tmp;
1361 BT_DBG("conn %p", conn);
1363 mutex_lock(&conn->chan_lock);
1365 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1366 l2cap_chan_lock(chan);
1368 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1369 l2cap_chan_unlock(chan);
1373 if (chan->state == BT_CONNECT) {
1374 if (!l2cap_chan_check_security(chan) ||
1375 !__l2cap_no_conn_pending(chan)) {
1376 l2cap_chan_unlock(chan);
1380 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1381 && test_bit(CONF_STATE2_DEVICE,
1382 &chan->conf_state)) {
1383 l2cap_chan_close(chan, ECONNRESET);
1384 l2cap_chan_unlock(chan);
1388 l2cap_start_connection(chan);
1390 } else if (chan->state == BT_CONNECT2) {
1391 struct l2cap_conn_rsp rsp;
1393 rsp.scid = cpu_to_le16(chan->dcid);
1394 rsp.dcid = cpu_to_le16(chan->scid);
1396 if (l2cap_chan_check_security(chan)) {
1397 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1398 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1399 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1400 chan->ops->defer(chan);
1403 l2cap_state_change(chan, BT_CONFIG);
1404 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1405 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1408 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1409 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1412 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1415 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1416 rsp.result != L2CAP_CR_SUCCESS) {
1417 l2cap_chan_unlock(chan);
1421 set_bit(CONF_REQ_SENT, &chan->conf_state);
1422 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1423 l2cap_build_conf_req(chan, buf), buf);
1424 chan->num_conf_req++;
1427 l2cap_chan_unlock(chan);
1430 mutex_unlock(&conn->chan_lock);
1433 /* Find socket with cid and source/destination bdaddr.
1434 * Returns closest match, locked.
1436 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1440 struct l2cap_chan *c, *c1 = NULL;
1442 read_lock(&chan_list_lock);
1444 list_for_each_entry(c, &chan_list, global_l) {
1445 if (state && c->state != state)
1448 if (c->scid == cid) {
1449 int src_match, dst_match;
1450 int src_any, dst_any;
1453 src_match = !bacmp(&c->src, src);
1454 dst_match = !bacmp(&c->dst, dst);
1455 if (src_match && dst_match) {
1456 read_unlock(&chan_list_lock);
1461 src_any = !bacmp(&c->src, BDADDR_ANY);
1462 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1463 if ((src_match && dst_any) || (src_any && dst_match) ||
1464 (src_any && dst_any))
1469 read_unlock(&chan_list_lock);
1474 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1476 struct hci_conn *hcon = conn->hcon;
1477 struct l2cap_chan *chan, *pchan;
1482 /* Check if we have socket listening on cid */
1483 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1484 &hcon->src, &hcon->dst);
1488 /* Client ATT sockets should override the server one */
1489 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1492 dst_type = bdaddr_type(hcon, hcon->dst_type);
1494 /* If device is blocked, do not create a channel for it */
1495 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1498 l2cap_chan_lock(pchan);
1500 chan = pchan->ops->new_connection(pchan);
1504 chan->dcid = L2CAP_CID_ATT;
1506 bacpy(&chan->src, &hcon->src);
1507 bacpy(&chan->dst, &hcon->dst);
1508 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1509 chan->dst_type = dst_type;
1511 __l2cap_chan_add(conn, chan);
1514 l2cap_chan_unlock(pchan);
1517 static void l2cap_conn_ready(struct l2cap_conn *conn)
1519 struct l2cap_chan *chan;
1520 struct hci_conn *hcon = conn->hcon;
1522 BT_DBG("conn %p", conn);
1524 /* For outgoing pairing which doesn't necessarily have an
1525 * associated socket (e.g. mgmt_pair_device).
1527 if (hcon->out && hcon->type == LE_LINK)
1528 smp_conn_security(hcon, hcon->pending_sec_level);
1530 mutex_lock(&conn->chan_lock);
1532 if (hcon->type == LE_LINK)
1533 l2cap_le_conn_ready(conn);
1535 list_for_each_entry(chan, &conn->chan_l, list) {
1537 l2cap_chan_lock(chan);
1539 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1540 l2cap_chan_unlock(chan);
1544 if (hcon->type == LE_LINK) {
1545 l2cap_le_start(chan);
1546 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1547 l2cap_chan_ready(chan);
1549 } else if (chan->state == BT_CONNECT) {
1550 l2cap_do_start(chan);
1553 l2cap_chan_unlock(chan);
1556 mutex_unlock(&conn->chan_lock);
1559 /* Notify sockets that we cannot guaranty reliability anymore */
1560 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1562 struct l2cap_chan *chan;
1564 BT_DBG("conn %p", conn);
1566 mutex_lock(&conn->chan_lock);
1568 list_for_each_entry(chan, &conn->chan_l, list) {
1569 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1570 l2cap_chan_set_err(chan, err);
1573 mutex_unlock(&conn->chan_lock);
1576 static void l2cap_info_timeout(struct work_struct *work)
1578 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1581 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1582 conn->info_ident = 0;
1584 l2cap_conn_start(conn);
1589 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1590 * callback is called during registration. The ->remove callback is called
1591 * during unregistration.
1592 * An l2cap_user object can either be explicitly unregistered or when the
1593 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1594 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1595 * External modules must own a reference to the l2cap_conn object if they intend
1596 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1597 * any time if they don't.
1600 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1602 struct hci_dev *hdev = conn->hcon->hdev;
1605 /* We need to check whether l2cap_conn is registered. If it is not, we
1606 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1607 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1608 * relies on the parent hci_conn object to be locked. This itself relies
1609 * on the hci_dev object to be locked. So we must lock the hci device
1614 if (user->list.next || user->list.prev) {
1619 /* conn->hchan is NULL after l2cap_conn_del() was called */
1625 ret = user->probe(conn, user);
1629 list_add(&user->list, &conn->users);
1633 hci_dev_unlock(hdev);
1636 EXPORT_SYMBOL(l2cap_register_user);
1638 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1640 struct hci_dev *hdev = conn->hcon->hdev;
1644 if (!user->list.next || !user->list.prev)
1647 list_del(&user->list);
1648 user->list.next = NULL;
1649 user->list.prev = NULL;
1650 user->remove(conn, user);
1653 hci_dev_unlock(hdev);
1655 EXPORT_SYMBOL(l2cap_unregister_user);
1657 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1659 struct l2cap_user *user;
1661 while (!list_empty(&conn->users)) {
1662 user = list_first_entry(&conn->users, struct l2cap_user, list);
1663 list_del(&user->list);
1664 user->list.next = NULL;
1665 user->list.prev = NULL;
1666 user->remove(conn, user);
1670 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1672 struct l2cap_conn *conn = hcon->l2cap_data;
1673 struct l2cap_chan *chan, *l;
1678 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1680 kfree_skb(conn->rx_skb);
1682 l2cap_unregister_all_users(conn);
1684 mutex_lock(&conn->chan_lock);
1687 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1688 l2cap_chan_hold(chan);
1689 l2cap_chan_lock(chan);
1691 l2cap_chan_del(chan, err);
1693 l2cap_chan_unlock(chan);
1695 chan->ops->close(chan);
1696 l2cap_chan_put(chan);
1699 mutex_unlock(&conn->chan_lock);
1701 hci_chan_del(conn->hchan);
1703 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1704 cancel_delayed_work_sync(&conn->info_timer);
1706 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1707 cancel_delayed_work_sync(&conn->security_timer);
1708 smp_chan_destroy(conn);
1711 hcon->l2cap_data = NULL;
1713 l2cap_conn_put(conn);
1716 static void security_timeout(struct work_struct *work)
1718 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1719 security_timer.work);
1721 BT_DBG("conn %p", conn);
1723 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1724 smp_chan_destroy(conn);
1725 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1729 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1731 struct l2cap_conn *conn = hcon->l2cap_data;
1732 struct hci_chan *hchan;
1737 hchan = hci_chan_create(hcon);
1741 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1743 hci_chan_del(hchan);
1747 kref_init(&conn->ref);
1748 hcon->l2cap_data = conn;
1750 hci_conn_get(conn->hcon);
1751 conn->hchan = hchan;
1753 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1755 switch (hcon->type) {
1757 if (hcon->hdev->le_mtu) {
1758 conn->mtu = hcon->hdev->le_mtu;
1763 conn->mtu = hcon->hdev->acl_mtu;
1767 conn->feat_mask = 0;
1769 if (hcon->type == ACL_LINK)
1770 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1771 &hcon->hdev->dev_flags);
1773 spin_lock_init(&conn->lock);
1774 mutex_init(&conn->chan_lock);
1776 INIT_LIST_HEAD(&conn->chan_l);
1777 INIT_LIST_HEAD(&conn->users);
1779 if (hcon->type == LE_LINK)
1780 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1782 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1784 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1789 static void l2cap_conn_free(struct kref *ref)
1791 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1793 hci_conn_put(conn->hcon);
1797 void l2cap_conn_get(struct l2cap_conn *conn)
1799 kref_get(&conn->ref);
1801 EXPORT_SYMBOL(l2cap_conn_get);
1803 void l2cap_conn_put(struct l2cap_conn *conn)
1805 kref_put(&conn->ref, l2cap_conn_free);
1807 EXPORT_SYMBOL(l2cap_conn_put);
1809 /* ---- Socket interface ---- */
1811 /* Find socket with psm and source / destination bdaddr.
1812 * Returns closest match.
1814 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1819 struct l2cap_chan *c, *c1 = NULL;
1821 read_lock(&chan_list_lock);
1823 list_for_each_entry(c, &chan_list, global_l) {
1824 if (state && c->state != state)
1827 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1830 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1833 if (c->psm == psm) {
1834 int src_match, dst_match;
1835 int src_any, dst_any;
1838 src_match = !bacmp(&c->src, src);
1839 dst_match = !bacmp(&c->dst, dst);
1840 if (src_match && dst_match) {
1841 read_unlock(&chan_list_lock);
1846 src_any = !bacmp(&c->src, BDADDR_ANY);
1847 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1848 if ((src_match && dst_any) || (src_any && dst_match) ||
1849 (src_any && dst_any))
1854 read_unlock(&chan_list_lock);
1859 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1860 bdaddr_t *dst, u8 dst_type)
1862 struct l2cap_conn *conn;
1863 struct hci_conn *hcon;
1864 struct hci_dev *hdev;
1868 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1869 dst_type, __le16_to_cpu(psm));
1871 hdev = hci_get_route(dst, &chan->src);
1873 return -EHOSTUNREACH;
1877 l2cap_chan_lock(chan);
1879 /* PSM must be odd and lsb of upper byte must be 0 */
1880 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1881 chan->chan_type != L2CAP_CHAN_RAW) {
1886 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1891 switch (chan->mode) {
1892 case L2CAP_MODE_BASIC:
1893 case L2CAP_MODE_LE_FLOWCTL:
1895 case L2CAP_MODE_ERTM:
1896 case L2CAP_MODE_STREAMING:
1905 switch (chan->state) {
1909 /* Already connecting */
1914 /* Already connected */
1928 /* Set destination address and psm */
1929 bacpy(&chan->dst, dst);
1930 chan->dst_type = dst_type;
1935 auth_type = l2cap_get_auth_type(chan);
1937 if (bdaddr_type_is_le(dst_type))
1938 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1939 chan->sec_level, auth_type);
1941 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1942 chan->sec_level, auth_type);
1945 err = PTR_ERR(hcon);
1949 conn = l2cap_conn_add(hcon);
1951 hci_conn_drop(hcon);
1956 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1957 hci_conn_drop(hcon);
1962 /* Update source addr of the socket */
1963 bacpy(&chan->src, &hcon->src);
1964 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1966 l2cap_chan_unlock(chan);
1967 l2cap_chan_add(conn, chan);
1968 l2cap_chan_lock(chan);
1970 /* l2cap_chan_add takes its own ref so we can drop this one */
1971 hci_conn_drop(hcon);
1973 l2cap_state_change(chan, BT_CONNECT);
1974 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1976 if (hcon->state == BT_CONNECTED) {
1977 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1978 __clear_chan_timer(chan);
1979 if (l2cap_chan_check_security(chan))
1980 l2cap_state_change(chan, BT_CONNECTED);
1982 l2cap_do_start(chan);
1988 l2cap_chan_unlock(chan);
1989 hci_dev_unlock(hdev);
1994 static void l2cap_monitor_timeout(struct work_struct *work)
1996 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1997 monitor_timer.work);
1999 BT_DBG("chan %p", chan);
2001 l2cap_chan_lock(chan);
2004 l2cap_chan_unlock(chan);
2005 l2cap_chan_put(chan);
2009 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2011 l2cap_chan_unlock(chan);
2012 l2cap_chan_put(chan);
2015 static void l2cap_retrans_timeout(struct work_struct *work)
2017 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2018 retrans_timer.work);
2020 BT_DBG("chan %p", chan);
2022 l2cap_chan_lock(chan);
2025 l2cap_chan_unlock(chan);
2026 l2cap_chan_put(chan);
2030 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2031 l2cap_chan_unlock(chan);
2032 l2cap_chan_put(chan);
2035 static void l2cap_streaming_send(struct l2cap_chan *chan,
2036 struct sk_buff_head *skbs)
2038 struct sk_buff *skb;
2039 struct l2cap_ctrl *control;
2041 BT_DBG("chan %p, skbs %p", chan, skbs);
2043 if (__chan_is_moving(chan))
2046 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2048 while (!skb_queue_empty(&chan->tx_q)) {
2050 skb = skb_dequeue(&chan->tx_q);
2052 bt_cb(skb)->control.retries = 1;
2053 control = &bt_cb(skb)->control;
2055 control->reqseq = 0;
2056 control->txseq = chan->next_tx_seq;
2058 __pack_control(chan, control, skb);
2060 if (chan->fcs == L2CAP_FCS_CRC16) {
2061 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2062 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2065 l2cap_do_send(chan, skb);
2067 BT_DBG("Sent txseq %u", control->txseq);
2069 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2070 chan->frames_sent++;
2074 static int l2cap_ertm_send(struct l2cap_chan *chan)
2076 struct sk_buff *skb, *tx_skb;
2077 struct l2cap_ctrl *control;
2080 BT_DBG("chan %p", chan);
2082 if (chan->state != BT_CONNECTED)
2085 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2088 if (__chan_is_moving(chan))
2091 while (chan->tx_send_head &&
2092 chan->unacked_frames < chan->remote_tx_win &&
2093 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2095 skb = chan->tx_send_head;
2097 bt_cb(skb)->control.retries = 1;
2098 control = &bt_cb(skb)->control;
2100 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2103 control->reqseq = chan->buffer_seq;
2104 chan->last_acked_seq = chan->buffer_seq;
2105 control->txseq = chan->next_tx_seq;
2107 __pack_control(chan, control, skb);
2109 if (chan->fcs == L2CAP_FCS_CRC16) {
2110 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2111 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2114 /* Clone after data has been modified. Data is assumed to be
2115 read-only (for locking purposes) on cloned sk_buffs.
2117 tx_skb = skb_clone(skb, GFP_KERNEL);
2122 __set_retrans_timer(chan);
2124 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2125 chan->unacked_frames++;
2126 chan->frames_sent++;
2129 if (skb_queue_is_last(&chan->tx_q, skb))
2130 chan->tx_send_head = NULL;
2132 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2134 l2cap_do_send(chan, tx_skb);
2135 BT_DBG("Sent txseq %u", control->txseq);
2138 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2139 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2144 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2146 struct l2cap_ctrl control;
2147 struct sk_buff *skb;
2148 struct sk_buff *tx_skb;
2151 BT_DBG("chan %p", chan);
2153 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2156 if (__chan_is_moving(chan))
2159 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2160 seq = l2cap_seq_list_pop(&chan->retrans_list);
2162 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2164 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2169 bt_cb(skb)->control.retries++;
2170 control = bt_cb(skb)->control;
2172 if (chan->max_tx != 0 &&
2173 bt_cb(skb)->control.retries > chan->max_tx) {
2174 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2175 l2cap_send_disconn_req(chan, ECONNRESET);
2176 l2cap_seq_list_clear(&chan->retrans_list);
2180 control.reqseq = chan->buffer_seq;
2181 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2186 if (skb_cloned(skb)) {
2187 /* Cloned sk_buffs are read-only, so we need a
2190 tx_skb = skb_copy(skb, GFP_KERNEL);
2192 tx_skb = skb_clone(skb, GFP_KERNEL);
2196 l2cap_seq_list_clear(&chan->retrans_list);
2200 /* Update skb contents */
2201 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2202 put_unaligned_le32(__pack_extended_control(&control),
2203 tx_skb->data + L2CAP_HDR_SIZE);
2205 put_unaligned_le16(__pack_enhanced_control(&control),
2206 tx_skb->data + L2CAP_HDR_SIZE);
2209 if (chan->fcs == L2CAP_FCS_CRC16) {
2210 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2211 put_unaligned_le16(fcs, skb_put(tx_skb,
2215 l2cap_do_send(chan, tx_skb);
2217 BT_DBG("Resent txseq %d", control.txseq);
2219 chan->last_acked_seq = chan->buffer_seq;
2223 static void l2cap_retransmit(struct l2cap_chan *chan,
2224 struct l2cap_ctrl *control)
2226 BT_DBG("chan %p, control %p", chan, control);
2228 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2229 l2cap_ertm_resend(chan);
2232 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2233 struct l2cap_ctrl *control)
2235 struct sk_buff *skb;
2237 BT_DBG("chan %p, control %p", chan, control);
2240 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2242 l2cap_seq_list_clear(&chan->retrans_list);
2244 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2247 if (chan->unacked_frames) {
2248 skb_queue_walk(&chan->tx_q, skb) {
2249 if (bt_cb(skb)->control.txseq == control->reqseq ||
2250 skb == chan->tx_send_head)
2254 skb_queue_walk_from(&chan->tx_q, skb) {
2255 if (skb == chan->tx_send_head)
2258 l2cap_seq_list_append(&chan->retrans_list,
2259 bt_cb(skb)->control.txseq);
2262 l2cap_ertm_resend(chan);
2266 static void l2cap_send_ack(struct l2cap_chan *chan)
2268 struct l2cap_ctrl control;
2269 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2270 chan->last_acked_seq);
2273 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2274 chan, chan->last_acked_seq, chan->buffer_seq);
2276 memset(&control, 0, sizeof(control));
2279 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2280 chan->rx_state == L2CAP_RX_STATE_RECV) {
2281 __clear_ack_timer(chan);
2282 control.super = L2CAP_SUPER_RNR;
2283 control.reqseq = chan->buffer_seq;
2284 l2cap_send_sframe(chan, &control);
2286 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2287 l2cap_ertm_send(chan);
2288 /* If any i-frames were sent, they included an ack */
2289 if (chan->buffer_seq == chan->last_acked_seq)
2293 /* Ack now if the window is 3/4ths full.
2294 * Calculate without mul or div
2296 threshold = chan->ack_win;
2297 threshold += threshold << 1;
2300 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2303 if (frames_to_ack >= threshold) {
2304 __clear_ack_timer(chan);
2305 control.super = L2CAP_SUPER_RR;
2306 control.reqseq = chan->buffer_seq;
2307 l2cap_send_sframe(chan, &control);
2312 __set_ack_timer(chan);
2316 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2317 struct msghdr *msg, int len,
2318 int count, struct sk_buff *skb)
2320 struct l2cap_conn *conn = chan->conn;
2321 struct sk_buff **frag;
2324 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2330 /* Continuation fragments (no L2CAP header) */
2331 frag = &skb_shinfo(skb)->frag_list;
2333 struct sk_buff *tmp;
2335 count = min_t(unsigned int, conn->mtu, len);
2337 tmp = chan->ops->alloc_skb(chan, count,
2338 msg->msg_flags & MSG_DONTWAIT);
2340 return PTR_ERR(tmp);
2344 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2347 (*frag)->priority = skb->priority;
2352 skb->len += (*frag)->len;
2353 skb->data_len += (*frag)->len;
2355 frag = &(*frag)->next;
2361 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2362 struct msghdr *msg, size_t len,
2365 struct l2cap_conn *conn = chan->conn;
2366 struct sk_buff *skb;
2367 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2368 struct l2cap_hdr *lh;
2370 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2371 __le16_to_cpu(chan->psm), len, priority);
2373 count = min_t(unsigned int, (conn->mtu - hlen), len);
2375 skb = chan->ops->alloc_skb(chan, count + hlen,
2376 msg->msg_flags & MSG_DONTWAIT);
2380 skb->priority = priority;
2382 /* Create L2CAP header */
2383 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2384 lh->cid = cpu_to_le16(chan->dcid);
2385 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2386 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2388 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2389 if (unlikely(err < 0)) {
2391 return ERR_PTR(err);
2396 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2397 struct msghdr *msg, size_t len,
2400 struct l2cap_conn *conn = chan->conn;
2401 struct sk_buff *skb;
2403 struct l2cap_hdr *lh;
2405 BT_DBG("chan %p len %zu", chan, len);
2407 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2409 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2410 msg->msg_flags & MSG_DONTWAIT);
2414 skb->priority = priority;
2416 /* Create L2CAP header */
2417 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2418 lh->cid = cpu_to_le16(chan->dcid);
2419 lh->len = cpu_to_le16(len);
2421 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2422 if (unlikely(err < 0)) {
2424 return ERR_PTR(err);
2429 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2430 struct msghdr *msg, size_t len,
2433 struct l2cap_conn *conn = chan->conn;
2434 struct sk_buff *skb;
2435 int err, count, hlen;
2436 struct l2cap_hdr *lh;
2438 BT_DBG("chan %p len %zu", chan, len);
2441 return ERR_PTR(-ENOTCONN);
2443 hlen = __ertm_hdr_size(chan);
2446 hlen += L2CAP_SDULEN_SIZE;
2448 if (chan->fcs == L2CAP_FCS_CRC16)
2449 hlen += L2CAP_FCS_SIZE;
2451 count = min_t(unsigned int, (conn->mtu - hlen), len);
2453 skb = chan->ops->alloc_skb(chan, count + hlen,
2454 msg->msg_flags & MSG_DONTWAIT);
2458 /* Create L2CAP header */
2459 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2460 lh->cid = cpu_to_le16(chan->dcid);
2461 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2463 /* Control header is populated later */
2464 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2465 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2467 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2470 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2472 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2473 if (unlikely(err < 0)) {
2475 return ERR_PTR(err);
2478 bt_cb(skb)->control.fcs = chan->fcs;
2479 bt_cb(skb)->control.retries = 0;
2483 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2484 struct sk_buff_head *seg_queue,
2485 struct msghdr *msg, size_t len)
2487 struct sk_buff *skb;
2492 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2494 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2495 * so fragmented skbs are not used. The HCI layer's handling
2496 * of fragmented skbs is not compatible with ERTM's queueing.
2499 /* PDU size is derived from the HCI MTU */
2500 pdu_len = chan->conn->mtu;
2502 /* Constrain PDU size for BR/EDR connections */
2504 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2506 /* Adjust for largest possible L2CAP overhead. */
2508 pdu_len -= L2CAP_FCS_SIZE;
2510 pdu_len -= __ertm_hdr_size(chan);
2512 /* Remote device may have requested smaller PDUs */
2513 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2515 if (len <= pdu_len) {
2516 sar = L2CAP_SAR_UNSEGMENTED;
2520 sar = L2CAP_SAR_START;
2522 pdu_len -= L2CAP_SDULEN_SIZE;
2526 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2529 __skb_queue_purge(seg_queue);
2530 return PTR_ERR(skb);
2533 bt_cb(skb)->control.sar = sar;
2534 __skb_queue_tail(seg_queue, skb);
2539 pdu_len += L2CAP_SDULEN_SIZE;
2542 if (len <= pdu_len) {
2543 sar = L2CAP_SAR_END;
2546 sar = L2CAP_SAR_CONTINUE;
2553 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2555 size_t len, u16 sdulen)
2557 struct l2cap_conn *conn = chan->conn;
2558 struct sk_buff *skb;
2559 int err, count, hlen;
2560 struct l2cap_hdr *lh;
2562 BT_DBG("chan %p len %zu", chan, len);
2565 return ERR_PTR(-ENOTCONN);
2567 hlen = L2CAP_HDR_SIZE;
2570 hlen += L2CAP_SDULEN_SIZE;
2572 count = min_t(unsigned int, (conn->mtu - hlen), len);
2574 skb = chan->ops->alloc_skb(chan, count + hlen,
2575 msg->msg_flags & MSG_DONTWAIT);
2579 /* Create L2CAP header */
2580 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2581 lh->cid = cpu_to_le16(chan->dcid);
2582 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2585 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2587 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2588 if (unlikely(err < 0)) {
2590 return ERR_PTR(err);
2596 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2597 struct sk_buff_head *seg_queue,
2598 struct msghdr *msg, size_t len)
2600 struct sk_buff *skb;
2604 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2606 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2608 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2611 pdu_len -= L2CAP_SDULEN_SIZE;
2617 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2619 __skb_queue_purge(seg_queue);
2620 return PTR_ERR(skb);
2623 __skb_queue_tail(seg_queue, skb);
2629 pdu_len += L2CAP_SDULEN_SIZE;
2636 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2639 struct sk_buff *skb;
2641 struct sk_buff_head seg_queue;
2646 /* Connectionless channel */
2647 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2648 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2650 return PTR_ERR(skb);
2652 l2cap_do_send(chan, skb);
2656 switch (chan->mode) {
2657 case L2CAP_MODE_LE_FLOWCTL:
2658 /* Check outgoing MTU */
2659 if (len > chan->omtu)
2662 if (!chan->tx_credits)
2665 __skb_queue_head_init(&seg_queue);
2667 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2669 if (chan->state != BT_CONNECTED) {
2670 __skb_queue_purge(&seg_queue);
2677 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2679 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2680 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2684 if (!chan->tx_credits)
2685 chan->ops->suspend(chan);
2691 case L2CAP_MODE_BASIC:
2692 /* Check outgoing MTU */
2693 if (len > chan->omtu)
2696 /* Create a basic PDU */
2697 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2699 return PTR_ERR(skb);
2701 l2cap_do_send(chan, skb);
2705 case L2CAP_MODE_ERTM:
2706 case L2CAP_MODE_STREAMING:
2707 /* Check outgoing MTU */
2708 if (len > chan->omtu) {
2713 __skb_queue_head_init(&seg_queue);
2715 /* Do segmentation before calling in to the state machine,
2716 * since it's possible to block while waiting for memory
2719 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2721 /* The channel could have been closed while segmenting,
2722 * check that it is still connected.
2724 if (chan->state != BT_CONNECTED) {
2725 __skb_queue_purge(&seg_queue);
2732 if (chan->mode == L2CAP_MODE_ERTM)
2733 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2735 l2cap_streaming_send(chan, &seg_queue);
2739 /* If the skbs were not queued for sending, they'll still be in
2740 * seg_queue and need to be purged.
2742 __skb_queue_purge(&seg_queue);
2746 BT_DBG("bad state %1.1x", chan->mode);
2753 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2755 struct l2cap_ctrl control;
2758 BT_DBG("chan %p, txseq %u", chan, txseq);
2760 memset(&control, 0, sizeof(control));
2762 control.super = L2CAP_SUPER_SREJ;
2764 for (seq = chan->expected_tx_seq; seq != txseq;
2765 seq = __next_seq(chan, seq)) {
2766 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2767 control.reqseq = seq;
2768 l2cap_send_sframe(chan, &control);
2769 l2cap_seq_list_append(&chan->srej_list, seq);
2773 chan->expected_tx_seq = __next_seq(chan, txseq);
2776 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2778 struct l2cap_ctrl control;
2780 BT_DBG("chan %p", chan);
2782 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2785 memset(&control, 0, sizeof(control));
2787 control.super = L2CAP_SUPER_SREJ;
2788 control.reqseq = chan->srej_list.tail;
2789 l2cap_send_sframe(chan, &control);
2792 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2794 struct l2cap_ctrl control;
2798 BT_DBG("chan %p, txseq %u", chan, txseq);
2800 memset(&control, 0, sizeof(control));
2802 control.super = L2CAP_SUPER_SREJ;
2804 /* Capture initial list head to allow only one pass through the list. */
2805 initial_head = chan->srej_list.head;
2808 seq = l2cap_seq_list_pop(&chan->srej_list);
2809 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2812 control.reqseq = seq;
2813 l2cap_send_sframe(chan, &control);
2814 l2cap_seq_list_append(&chan->srej_list, seq);
2815 } while (chan->srej_list.head != initial_head);
2818 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2820 struct sk_buff *acked_skb;
2823 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2825 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2828 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2829 chan->expected_ack_seq, chan->unacked_frames);
2831 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2832 ackseq = __next_seq(chan, ackseq)) {
2834 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2836 skb_unlink(acked_skb, &chan->tx_q);
2837 kfree_skb(acked_skb);
2838 chan->unacked_frames--;
2842 chan->expected_ack_seq = reqseq;
2844 if (chan->unacked_frames == 0)
2845 __clear_retrans_timer(chan);
2847 BT_DBG("unacked_frames %u", chan->unacked_frames);
2850 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2852 BT_DBG("chan %p", chan);
2854 chan->expected_tx_seq = chan->buffer_seq;
2855 l2cap_seq_list_clear(&chan->srej_list);
2856 skb_queue_purge(&chan->srej_q);
2857 chan->rx_state = L2CAP_RX_STATE_RECV;
2860 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2861 struct l2cap_ctrl *control,
2862 struct sk_buff_head *skbs, u8 event)
2864 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2868 case L2CAP_EV_DATA_REQUEST:
2869 if (chan->tx_send_head == NULL)
2870 chan->tx_send_head = skb_peek(skbs);
2872 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2873 l2cap_ertm_send(chan);
2875 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2876 BT_DBG("Enter LOCAL_BUSY");
2877 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2879 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2880 /* The SREJ_SENT state must be aborted if we are to
2881 * enter the LOCAL_BUSY state.
2883 l2cap_abort_rx_srej_sent(chan);
2886 l2cap_send_ack(chan);
2889 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2890 BT_DBG("Exit LOCAL_BUSY");
2891 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2893 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2894 struct l2cap_ctrl local_control;
2896 memset(&local_control, 0, sizeof(local_control));
2897 local_control.sframe = 1;
2898 local_control.super = L2CAP_SUPER_RR;
2899 local_control.poll = 1;
2900 local_control.reqseq = chan->buffer_seq;
2901 l2cap_send_sframe(chan, &local_control);
2903 chan->retry_count = 1;
2904 __set_monitor_timer(chan);
2905 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2908 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2909 l2cap_process_reqseq(chan, control->reqseq);
2911 case L2CAP_EV_EXPLICIT_POLL:
2912 l2cap_send_rr_or_rnr(chan, 1);
2913 chan->retry_count = 1;
2914 __set_monitor_timer(chan);
2915 __clear_ack_timer(chan);
2916 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2918 case L2CAP_EV_RETRANS_TO:
2919 l2cap_send_rr_or_rnr(chan, 1);
2920 chan->retry_count = 1;
2921 __set_monitor_timer(chan);
2922 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2924 case L2CAP_EV_RECV_FBIT:
2925 /* Nothing to process */
2932 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2933 struct l2cap_ctrl *control,
2934 struct sk_buff_head *skbs, u8 event)
2936 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2940 case L2CAP_EV_DATA_REQUEST:
2941 if (chan->tx_send_head == NULL)
2942 chan->tx_send_head = skb_peek(skbs);
2943 /* Queue data, but don't send. */
2944 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2946 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2947 BT_DBG("Enter LOCAL_BUSY");
2948 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2950 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2951 /* The SREJ_SENT state must be aborted if we are to
2952 * enter the LOCAL_BUSY state.
2954 l2cap_abort_rx_srej_sent(chan);
2957 l2cap_send_ack(chan);
2960 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2961 BT_DBG("Exit LOCAL_BUSY");
2962 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2964 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2965 struct l2cap_ctrl local_control;
2966 memset(&local_control, 0, sizeof(local_control));
2967 local_control.sframe = 1;
2968 local_control.super = L2CAP_SUPER_RR;
2969 local_control.poll = 1;
2970 local_control.reqseq = chan->buffer_seq;
2971 l2cap_send_sframe(chan, &local_control);
2973 chan->retry_count = 1;
2974 __set_monitor_timer(chan);
2975 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2978 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2979 l2cap_process_reqseq(chan, control->reqseq);
2983 case L2CAP_EV_RECV_FBIT:
2984 if (control && control->final) {
2985 __clear_monitor_timer(chan);
2986 if (chan->unacked_frames > 0)
2987 __set_retrans_timer(chan);
2988 chan->retry_count = 0;
2989 chan->tx_state = L2CAP_TX_STATE_XMIT;
2990 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2993 case L2CAP_EV_EXPLICIT_POLL:
2996 case L2CAP_EV_MONITOR_TO:
2997 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2998 l2cap_send_rr_or_rnr(chan, 1);
2999 __set_monitor_timer(chan);
3000 chan->retry_count++;
3002 l2cap_send_disconn_req(chan, ECONNABORTED);
3010 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3011 struct sk_buff_head *skbs, u8 event)
3013 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3014 chan, control, skbs, event, chan->tx_state);
3016 switch (chan->tx_state) {
3017 case L2CAP_TX_STATE_XMIT:
3018 l2cap_tx_state_xmit(chan, control, skbs, event);
3020 case L2CAP_TX_STATE_WAIT_F:
3021 l2cap_tx_state_wait_f(chan, control, skbs, event);
3029 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3030 struct l2cap_ctrl *control)
3032 BT_DBG("chan %p, control %p", chan, control);
3033 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3036 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3037 struct l2cap_ctrl *control)
3039 BT_DBG("chan %p, control %p", chan, control);
3040 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3043 /* Copy frame to all raw sockets on that connection */
3044 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3046 struct sk_buff *nskb;
3047 struct l2cap_chan *chan;
3049 BT_DBG("conn %p", conn);
3051 mutex_lock(&conn->chan_lock);
3053 list_for_each_entry(chan, &conn->chan_l, list) {
3054 if (chan->chan_type != L2CAP_CHAN_RAW)
3057 /* Don't send frame to the channel it came from */
3058 if (bt_cb(skb)->chan == chan)
3061 nskb = skb_clone(skb, GFP_KERNEL);
3064 if (chan->ops->recv(chan, nskb))
3068 mutex_unlock(&conn->chan_lock);
3071 /* ---- L2CAP signalling commands ---- */
3072 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3073 u8 ident, u16 dlen, void *data)
3075 struct sk_buff *skb, **frag;
3076 struct l2cap_cmd_hdr *cmd;
3077 struct l2cap_hdr *lh;
3080 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3081 conn, code, ident, dlen);
3083 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3086 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3087 count = min_t(unsigned int, conn->mtu, len);
3089 skb = bt_skb_alloc(count, GFP_KERNEL);
3093 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3094 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3096 if (conn->hcon->type == LE_LINK)
3097 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3099 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3101 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3104 cmd->len = cpu_to_le16(dlen);
3107 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3108 memcpy(skb_put(skb, count), data, count);
3114 /* Continuation fragments (no L2CAP header) */
3115 frag = &skb_shinfo(skb)->frag_list;
3117 count = min_t(unsigned int, conn->mtu, len);
3119 *frag = bt_skb_alloc(count, GFP_KERNEL);
3123 memcpy(skb_put(*frag, count), data, count);
3128 frag = &(*frag)->next;
3138 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3141 struct l2cap_conf_opt *opt = *ptr;
3144 len = L2CAP_CONF_OPT_SIZE + opt->len;
3152 *val = *((u8 *) opt->val);
3156 *val = get_unaligned_le16(opt->val);
3160 *val = get_unaligned_le32(opt->val);
3164 *val = (unsigned long) opt->val;
3168 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3172 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3174 struct l2cap_conf_opt *opt = *ptr;
3176 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3183 *((u8 *) opt->val) = val;
3187 put_unaligned_le16(val, opt->val);
3191 put_unaligned_le32(val, opt->val);
3195 memcpy(opt->val, (void *) val, len);
3199 *ptr += L2CAP_CONF_OPT_SIZE + len;
3202 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3204 struct l2cap_conf_efs efs;
3206 switch (chan->mode) {
3207 case L2CAP_MODE_ERTM:
3208 efs.id = chan->local_id;
3209 efs.stype = chan->local_stype;
3210 efs.msdu = cpu_to_le16(chan->local_msdu);
3211 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3212 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3213 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3216 case L2CAP_MODE_STREAMING:
3218 efs.stype = L2CAP_SERV_BESTEFFORT;
3219 efs.msdu = cpu_to_le16(chan->local_msdu);
3220 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3229 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3230 (unsigned long) &efs);
3233 static void l2cap_ack_timeout(struct work_struct *work)
3235 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3239 BT_DBG("chan %p", chan);
3241 l2cap_chan_lock(chan);
3243 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3244 chan->last_acked_seq);
3247 l2cap_send_rr_or_rnr(chan, 0);
3249 l2cap_chan_unlock(chan);
3250 l2cap_chan_put(chan);
3253 int l2cap_ertm_init(struct l2cap_chan *chan)
3257 chan->next_tx_seq = 0;
3258 chan->expected_tx_seq = 0;
3259 chan->expected_ack_seq = 0;
3260 chan->unacked_frames = 0;
3261 chan->buffer_seq = 0;
3262 chan->frames_sent = 0;
3263 chan->last_acked_seq = 0;
3265 chan->sdu_last_frag = NULL;
3268 skb_queue_head_init(&chan->tx_q);
3270 chan->local_amp_id = AMP_ID_BREDR;
3271 chan->move_id = AMP_ID_BREDR;
3272 chan->move_state = L2CAP_MOVE_STABLE;
3273 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3275 if (chan->mode != L2CAP_MODE_ERTM)
3278 chan->rx_state = L2CAP_RX_STATE_RECV;
3279 chan->tx_state = L2CAP_TX_STATE_XMIT;
3281 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3282 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3283 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3285 skb_queue_head_init(&chan->srej_q);
3287 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3291 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3293 l2cap_seq_list_free(&chan->srej_list);
3298 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3301 case L2CAP_MODE_STREAMING:
3302 case L2CAP_MODE_ERTM:
3303 if (l2cap_mode_supported(mode, remote_feat_mask))
3307 return L2CAP_MODE_BASIC;
3311 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3313 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3316 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3318 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3321 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3322 struct l2cap_conf_rfc *rfc)
3324 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3325 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3327 /* Class 1 devices have must have ERTM timeouts
3328 * exceeding the Link Supervision Timeout. The
3329 * default Link Supervision Timeout for AMP
3330 * controllers is 10 seconds.
3332 * Class 1 devices use 0xffffffff for their
3333 * best-effort flush timeout, so the clamping logic
3334 * will result in a timeout that meets the above
3335 * requirement. ERTM timeouts are 16-bit values, so
3336 * the maximum timeout is 65.535 seconds.
3339 /* Convert timeout to milliseconds and round */
3340 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3342 /* This is the recommended formula for class 2 devices
3343 * that start ERTM timers when packets are sent to the
3346 ertm_to = 3 * ertm_to + 500;
3348 if (ertm_to > 0xffff)
3351 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3352 rfc->monitor_timeout = rfc->retrans_timeout;
3354 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3355 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3359 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3361 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3362 __l2cap_ews_supported(chan->conn)) {
3363 /* use extended control field */
3364 set_bit(FLAG_EXT_CTRL, &chan->flags);
3365 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3367 chan->tx_win = min_t(u16, chan->tx_win,
3368 L2CAP_DEFAULT_TX_WINDOW);
3369 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3371 chan->ack_win = chan->tx_win;
3374 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3376 struct l2cap_conf_req *req = data;
3377 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3378 void *ptr = req->data;
3381 BT_DBG("chan %p", chan);
3383 if (chan->num_conf_req || chan->num_conf_rsp)
3386 switch (chan->mode) {
3387 case L2CAP_MODE_STREAMING:
3388 case L2CAP_MODE_ERTM:
3389 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3392 if (__l2cap_efs_supported(chan->conn))
3393 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3397 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3402 if (chan->imtu != L2CAP_DEFAULT_MTU)
3403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3405 switch (chan->mode) {
3406 case L2CAP_MODE_BASIC:
3407 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3408 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3411 rfc.mode = L2CAP_MODE_BASIC;
3413 rfc.max_transmit = 0;
3414 rfc.retrans_timeout = 0;
3415 rfc.monitor_timeout = 0;
3416 rfc.max_pdu_size = 0;
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3419 (unsigned long) &rfc);
3422 case L2CAP_MODE_ERTM:
3423 rfc.mode = L2CAP_MODE_ERTM;
3424 rfc.max_transmit = chan->max_tx;
3426 __l2cap_set_ertm_timeouts(chan, &rfc);
3428 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3429 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3431 rfc.max_pdu_size = cpu_to_le16(size);
3433 l2cap_txwin_setup(chan);
3435 rfc.txwin_size = min_t(u16, chan->tx_win,
3436 L2CAP_DEFAULT_TX_WINDOW);
3438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3439 (unsigned long) &rfc);
3441 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3442 l2cap_add_opt_efs(&ptr, chan);
3444 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3445 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3448 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3449 if (chan->fcs == L2CAP_FCS_NONE ||
3450 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3451 chan->fcs = L2CAP_FCS_NONE;
3452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3457 case L2CAP_MODE_STREAMING:
3458 l2cap_txwin_setup(chan);
3459 rfc.mode = L2CAP_MODE_STREAMING;
3461 rfc.max_transmit = 0;
3462 rfc.retrans_timeout = 0;
3463 rfc.monitor_timeout = 0;
3465 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3466 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3468 rfc.max_pdu_size = cpu_to_le16(size);
3470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3471 (unsigned long) &rfc);
3473 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3474 l2cap_add_opt_efs(&ptr, chan);
3476 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3477 if (chan->fcs == L2CAP_FCS_NONE ||
3478 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3479 chan->fcs = L2CAP_FCS_NONE;
3480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3486 req->dcid = cpu_to_le16(chan->dcid);
3487 req->flags = __constant_cpu_to_le16(0);
3492 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3494 struct l2cap_conf_rsp *rsp = data;
3495 void *ptr = rsp->data;
3496 void *req = chan->conf_req;
3497 int len = chan->conf_len;
3498 int type, hint, olen;
3500 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3501 struct l2cap_conf_efs efs;
3503 u16 mtu = L2CAP_DEFAULT_MTU;
3504 u16 result = L2CAP_CONF_SUCCESS;
3507 BT_DBG("chan %p", chan);
3509 while (len >= L2CAP_CONF_OPT_SIZE) {
3510 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3512 hint = type & L2CAP_CONF_HINT;
3513 type &= L2CAP_CONF_MASK;
3516 case L2CAP_CONF_MTU:
3520 case L2CAP_CONF_FLUSH_TO:
3521 chan->flush_to = val;
3524 case L2CAP_CONF_QOS:
3527 case L2CAP_CONF_RFC:
3528 if (olen == sizeof(rfc))
3529 memcpy(&rfc, (void *) val, olen);
3532 case L2CAP_CONF_FCS:
3533 if (val == L2CAP_FCS_NONE)
3534 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3537 case L2CAP_CONF_EFS:
3539 if (olen == sizeof(efs))
3540 memcpy(&efs, (void *) val, olen);
3543 case L2CAP_CONF_EWS:
3544 if (!chan->conn->hs_enabled)
3545 return -ECONNREFUSED;
3547 set_bit(FLAG_EXT_CTRL, &chan->flags);
3548 set_bit(CONF_EWS_RECV, &chan->conf_state);
3549 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3550 chan->remote_tx_win = val;
3557 result = L2CAP_CONF_UNKNOWN;
3558 *((u8 *) ptr++) = type;
3563 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3566 switch (chan->mode) {
3567 case L2CAP_MODE_STREAMING:
3568 case L2CAP_MODE_ERTM:
3569 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3570 chan->mode = l2cap_select_mode(rfc.mode,
3571 chan->conn->feat_mask);
3576 if (__l2cap_efs_supported(chan->conn))
3577 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3579 return -ECONNREFUSED;
3582 if (chan->mode != rfc.mode)
3583 return -ECONNREFUSED;
3589 if (chan->mode != rfc.mode) {
3590 result = L2CAP_CONF_UNACCEPT;
3591 rfc.mode = chan->mode;
3593 if (chan->num_conf_rsp == 1)
3594 return -ECONNREFUSED;
3596 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3597 (unsigned long) &rfc);
3600 if (result == L2CAP_CONF_SUCCESS) {
3601 /* Configure output options and let the other side know
3602 * which ones we don't like. */
3604 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3605 result = L2CAP_CONF_UNACCEPT;
3608 set_bit(CONF_MTU_DONE, &chan->conf_state);
3610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3613 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3614 efs.stype != L2CAP_SERV_NOTRAFIC &&
3615 efs.stype != chan->local_stype) {
3617 result = L2CAP_CONF_UNACCEPT;
3619 if (chan->num_conf_req >= 1)
3620 return -ECONNREFUSED;
3622 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3624 (unsigned long) &efs);
3626 /* Send PENDING Conf Rsp */
3627 result = L2CAP_CONF_PENDING;
3628 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3633 case L2CAP_MODE_BASIC:
3634 chan->fcs = L2CAP_FCS_NONE;
3635 set_bit(CONF_MODE_DONE, &chan->conf_state);
3638 case L2CAP_MODE_ERTM:
3639 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3640 chan->remote_tx_win = rfc.txwin_size;
3642 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3644 chan->remote_max_tx = rfc.max_transmit;
3646 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3647 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3648 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3649 rfc.max_pdu_size = cpu_to_le16(size);
3650 chan->remote_mps = size;
3652 __l2cap_set_ertm_timeouts(chan, &rfc);
3654 set_bit(CONF_MODE_DONE, &chan->conf_state);
3656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3657 sizeof(rfc), (unsigned long) &rfc);
3659 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3660 chan->remote_id = efs.id;
3661 chan->remote_stype = efs.stype;
3662 chan->remote_msdu = le16_to_cpu(efs.msdu);
3663 chan->remote_flush_to =
3664 le32_to_cpu(efs.flush_to);
3665 chan->remote_acc_lat =
3666 le32_to_cpu(efs.acc_lat);
3667 chan->remote_sdu_itime =
3668 le32_to_cpu(efs.sdu_itime);
3669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3671 (unsigned long) &efs);
3675 case L2CAP_MODE_STREAMING:
3676 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3677 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3678 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3679 rfc.max_pdu_size = cpu_to_le16(size);
3680 chan->remote_mps = size;
3682 set_bit(CONF_MODE_DONE, &chan->conf_state);
3684 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3685 (unsigned long) &rfc);
3690 result = L2CAP_CONF_UNACCEPT;
3692 memset(&rfc, 0, sizeof(rfc));
3693 rfc.mode = chan->mode;
3696 if (result == L2CAP_CONF_SUCCESS)
3697 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3699 rsp->scid = cpu_to_le16(chan->dcid);
3700 rsp->result = cpu_to_le16(result);
3701 rsp->flags = __constant_cpu_to_le16(0);
3706 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3707 void *data, u16 *result)
3709 struct l2cap_conf_req *req = data;
3710 void *ptr = req->data;
3713 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3714 struct l2cap_conf_efs efs;
3716 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3718 while (len >= L2CAP_CONF_OPT_SIZE) {
3719 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3722 case L2CAP_CONF_MTU:
3723 if (val < L2CAP_DEFAULT_MIN_MTU) {
3724 *result = L2CAP_CONF_UNACCEPT;
3725 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3728 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3731 case L2CAP_CONF_FLUSH_TO:
3732 chan->flush_to = val;
3733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3737 case L2CAP_CONF_RFC:
3738 if (olen == sizeof(rfc))
3739 memcpy(&rfc, (void *)val, olen);
3741 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3742 rfc.mode != chan->mode)
3743 return -ECONNREFUSED;
3747 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3748 sizeof(rfc), (unsigned long) &rfc);
3751 case L2CAP_CONF_EWS:
3752 chan->ack_win = min_t(u16, val, chan->ack_win);
3753 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3757 case L2CAP_CONF_EFS:
3758 if (olen == sizeof(efs))
3759 memcpy(&efs, (void *)val, olen);
3761 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3762 efs.stype != L2CAP_SERV_NOTRAFIC &&
3763 efs.stype != chan->local_stype)
3764 return -ECONNREFUSED;
3766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3767 (unsigned long) &efs);
3770 case L2CAP_CONF_FCS:
3771 if (*result == L2CAP_CONF_PENDING)
3772 if (val == L2CAP_FCS_NONE)
3773 set_bit(CONF_RECV_NO_FCS,
3779 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3780 return -ECONNREFUSED;
3782 chan->mode = rfc.mode;
3784 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3786 case L2CAP_MODE_ERTM:
3787 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3788 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3789 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3790 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3791 chan->ack_win = min_t(u16, chan->ack_win,
3794 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3795 chan->local_msdu = le16_to_cpu(efs.msdu);
3796 chan->local_sdu_itime =
3797 le32_to_cpu(efs.sdu_itime);
3798 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3799 chan->local_flush_to =
3800 le32_to_cpu(efs.flush_to);
3804 case L2CAP_MODE_STREAMING:
3805 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3809 req->dcid = cpu_to_le16(chan->dcid);
3810 req->flags = __constant_cpu_to_le16(0);
3815 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3816 u16 result, u16 flags)
3818 struct l2cap_conf_rsp *rsp = data;
3819 void *ptr = rsp->data;
3821 BT_DBG("chan %p", chan);
3823 rsp->scid = cpu_to_le16(chan->dcid);
3824 rsp->result = cpu_to_le16(result);
3825 rsp->flags = cpu_to_le16(flags);
3830 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3832 struct l2cap_le_conn_rsp rsp;
3833 struct l2cap_conn *conn = chan->conn;
3835 BT_DBG("chan %p", chan);
3837 rsp.dcid = cpu_to_le16(chan->scid);
3838 rsp.mtu = cpu_to_le16(chan->imtu);
3839 rsp.mps = cpu_to_le16(chan->mps);
3840 rsp.credits = cpu_to_le16(chan->rx_credits);
3841 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3843 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3847 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3849 struct l2cap_conn_rsp rsp;
3850 struct l2cap_conn *conn = chan->conn;
3854 rsp.scid = cpu_to_le16(chan->dcid);
3855 rsp.dcid = cpu_to_le16(chan->scid);
3856 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3857 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3860 rsp_code = L2CAP_CREATE_CHAN_RSP;
3862 rsp_code = L2CAP_CONN_RSP;
3864 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3866 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3868 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3871 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3872 l2cap_build_conf_req(chan, buf), buf);
3873 chan->num_conf_req++;
3876 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3880 /* Use sane default values in case a misbehaving remote device
3881 * did not send an RFC or extended window size option.
3883 u16 txwin_ext = chan->ack_win;
3884 struct l2cap_conf_rfc rfc = {
3886 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3887 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3888 .max_pdu_size = cpu_to_le16(chan->imtu),
3889 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3892 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3894 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3897 while (len >= L2CAP_CONF_OPT_SIZE) {
3898 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3901 case L2CAP_CONF_RFC:
3902 if (olen == sizeof(rfc))
3903 memcpy(&rfc, (void *)val, olen);
3905 case L2CAP_CONF_EWS:
3912 case L2CAP_MODE_ERTM:
3913 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3914 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3915 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3916 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3917 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3919 chan->ack_win = min_t(u16, chan->ack_win,
3922 case L2CAP_MODE_STREAMING:
3923 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3927 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3928 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3931 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3933 if (cmd_len < sizeof(*rej))
3936 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3939 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3940 cmd->ident == conn->info_ident) {
3941 cancel_delayed_work(&conn->info_timer);
3943 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3944 conn->info_ident = 0;
3946 l2cap_conn_start(conn);
3952 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3953 struct l2cap_cmd_hdr *cmd,
3954 u8 *data, u8 rsp_code, u8 amp_id)
3956 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3957 struct l2cap_conn_rsp rsp;
3958 struct l2cap_chan *chan = NULL, *pchan;
3959 int result, status = L2CAP_CS_NO_INFO;
3961 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3962 __le16 psm = req->psm;
3964 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3966 /* Check if we have socket listening on psm */
3967 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3968 &conn->hcon->dst, ACL_LINK);
3970 result = L2CAP_CR_BAD_PSM;
3974 mutex_lock(&conn->chan_lock);
3975 l2cap_chan_lock(pchan);
3977 /* Check if the ACL is secure enough (if not SDP) */
3978 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3979 !hci_conn_check_link_mode(conn->hcon)) {
3980 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3981 result = L2CAP_CR_SEC_BLOCK;
3985 result = L2CAP_CR_NO_MEM;
3987 /* Check if we already have channel with that dcid */
3988 if (__l2cap_get_chan_by_dcid(conn, scid))
3991 chan = pchan->ops->new_connection(pchan);
3995 /* For certain devices (ex: HID mouse), support for authentication,
3996 * pairing and bonding is optional. For such devices, inorder to avoid
3997 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3998 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4000 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4002 bacpy(&chan->src, &conn->hcon->src);
4003 bacpy(&chan->dst, &conn->hcon->dst);
4004 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4005 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4008 chan->local_amp_id = amp_id;
4010 __l2cap_chan_add(conn, chan);
4014 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4016 chan->ident = cmd->ident;
4018 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4019 if (l2cap_chan_check_security(chan)) {
4020 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4021 l2cap_state_change(chan, BT_CONNECT2);
4022 result = L2CAP_CR_PEND;
4023 status = L2CAP_CS_AUTHOR_PEND;
4024 chan->ops->defer(chan);
4026 /* Force pending result for AMP controllers.
4027 * The connection will succeed after the
4028 * physical link is up.
4030 if (amp_id == AMP_ID_BREDR) {
4031 l2cap_state_change(chan, BT_CONFIG);
4032 result = L2CAP_CR_SUCCESS;
4034 l2cap_state_change(chan, BT_CONNECT2);
4035 result = L2CAP_CR_PEND;
4037 status = L2CAP_CS_NO_INFO;
4040 l2cap_state_change(chan, BT_CONNECT2);
4041 result = L2CAP_CR_PEND;
4042 status = L2CAP_CS_AUTHEN_PEND;
4045 l2cap_state_change(chan, BT_CONNECT2);
4046 result = L2CAP_CR_PEND;
4047 status = L2CAP_CS_NO_INFO;
4051 l2cap_chan_unlock(pchan);
4052 mutex_unlock(&conn->chan_lock);
4055 rsp.scid = cpu_to_le16(scid);
4056 rsp.dcid = cpu_to_le16(dcid);
4057 rsp.result = cpu_to_le16(result);
4058 rsp.status = cpu_to_le16(status);
4059 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4061 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4062 struct l2cap_info_req info;
4063 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4065 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4066 conn->info_ident = l2cap_get_ident(conn);
4068 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4070 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4071 sizeof(info), &info);
4074 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4075 result == L2CAP_CR_SUCCESS) {
4077 set_bit(CONF_REQ_SENT, &chan->conf_state);
4078 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4079 l2cap_build_conf_req(chan, buf), buf);
4080 chan->num_conf_req++;
4086 static int l2cap_connect_req(struct l2cap_conn *conn,
4087 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4089 struct hci_dev *hdev = conn->hcon->hdev;
4090 struct hci_conn *hcon = conn->hcon;
4092 if (cmd_len < sizeof(struct l2cap_conn_req))
4096 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4097 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4098 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4099 hcon->dst_type, 0, NULL, 0,
4101 hci_dev_unlock(hdev);
4103 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4107 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4111 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4112 u16 scid, dcid, result, status;
4113 struct l2cap_chan *chan;
4117 if (cmd_len < sizeof(*rsp))
4120 scid = __le16_to_cpu(rsp->scid);
4121 dcid = __le16_to_cpu(rsp->dcid);
4122 result = __le16_to_cpu(rsp->result);
4123 status = __le16_to_cpu(rsp->status);
4125 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4126 dcid, scid, result, status);
4128 mutex_lock(&conn->chan_lock);
4131 chan = __l2cap_get_chan_by_scid(conn, scid);
4137 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4146 l2cap_chan_lock(chan);
4149 case L2CAP_CR_SUCCESS:
4150 l2cap_state_change(chan, BT_CONFIG);
4153 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4155 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4158 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4159 l2cap_build_conf_req(chan, req), req);
4160 chan->num_conf_req++;
4164 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4168 l2cap_chan_del(chan, ECONNREFUSED);
4172 l2cap_chan_unlock(chan);
4175 mutex_unlock(&conn->chan_lock);
4180 static inline void set_default_fcs(struct l2cap_chan *chan)
4182 /* FCS is enabled only in ERTM or streaming mode, if one or both
4185 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4186 chan->fcs = L2CAP_FCS_NONE;
4187 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4188 chan->fcs = L2CAP_FCS_CRC16;
4191 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4192 u8 ident, u16 flags)
4194 struct l2cap_conn *conn = chan->conn;
4196 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4199 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4200 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4202 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4203 l2cap_build_conf_rsp(chan, data,
4204 L2CAP_CONF_SUCCESS, flags), data);
4207 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4210 struct l2cap_cmd_rej_cid rej;
4212 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4213 rej.scid = __cpu_to_le16(scid);
4214 rej.dcid = __cpu_to_le16(dcid);
4216 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4219 static inline int l2cap_config_req(struct l2cap_conn *conn,
4220 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4223 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4226 struct l2cap_chan *chan;
4229 if (cmd_len < sizeof(*req))
4232 dcid = __le16_to_cpu(req->dcid);
4233 flags = __le16_to_cpu(req->flags);
4235 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4237 chan = l2cap_get_chan_by_scid(conn, dcid);
4239 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4243 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4244 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4249 /* Reject if config buffer is too small. */
4250 len = cmd_len - sizeof(*req);
4251 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4252 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4253 l2cap_build_conf_rsp(chan, rsp,
4254 L2CAP_CONF_REJECT, flags), rsp);
4259 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4260 chan->conf_len += len;
4262 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4263 /* Incomplete config. Send empty response. */
4264 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4265 l2cap_build_conf_rsp(chan, rsp,
4266 L2CAP_CONF_SUCCESS, flags), rsp);
4270 /* Complete config. */
4271 len = l2cap_parse_conf_req(chan, rsp);
4273 l2cap_send_disconn_req(chan, ECONNRESET);
4277 chan->ident = cmd->ident;
4278 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4279 chan->num_conf_rsp++;
4281 /* Reset config buffer. */
4284 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4287 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4288 set_default_fcs(chan);
4290 if (chan->mode == L2CAP_MODE_ERTM ||
4291 chan->mode == L2CAP_MODE_STREAMING)
4292 err = l2cap_ertm_init(chan);
4295 l2cap_send_disconn_req(chan, -err);
4297 l2cap_chan_ready(chan);
4302 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4304 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4305 l2cap_build_conf_req(chan, buf), buf);
4306 chan->num_conf_req++;
4309 /* Got Conf Rsp PENDING from remote side and asume we sent
4310 Conf Rsp PENDING in the code above */
4311 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4312 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4314 /* check compatibility */
4316 /* Send rsp for BR/EDR channel */
4318 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4320 chan->ident = cmd->ident;
4324 l2cap_chan_unlock(chan);
4328 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4329 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4332 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4333 u16 scid, flags, result;
4334 struct l2cap_chan *chan;
4335 int len = cmd_len - sizeof(*rsp);
4338 if (cmd_len < sizeof(*rsp))
4341 scid = __le16_to_cpu(rsp->scid);
4342 flags = __le16_to_cpu(rsp->flags);
4343 result = __le16_to_cpu(rsp->result);
4345 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4348 chan = l2cap_get_chan_by_scid(conn, scid);
4353 case L2CAP_CONF_SUCCESS:
4354 l2cap_conf_rfc_get(chan, rsp->data, len);
4355 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4358 case L2CAP_CONF_PENDING:
4359 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4361 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4364 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4367 l2cap_send_disconn_req(chan, ECONNRESET);
4371 if (!chan->hs_hcon) {
4372 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4375 if (l2cap_check_efs(chan)) {
4376 amp_create_logical_link(chan);
4377 chan->ident = cmd->ident;
4383 case L2CAP_CONF_UNACCEPT:
4384 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4387 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4388 l2cap_send_disconn_req(chan, ECONNRESET);
4392 /* throw out any old stored conf requests */
4393 result = L2CAP_CONF_SUCCESS;
4394 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4397 l2cap_send_disconn_req(chan, ECONNRESET);
4401 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4402 L2CAP_CONF_REQ, len, req);
4403 chan->num_conf_req++;
4404 if (result != L2CAP_CONF_SUCCESS)
4410 l2cap_chan_set_err(chan, ECONNRESET);
4412 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4413 l2cap_send_disconn_req(chan, ECONNRESET);
4417 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4420 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4422 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4423 set_default_fcs(chan);
4425 if (chan->mode == L2CAP_MODE_ERTM ||
4426 chan->mode == L2CAP_MODE_STREAMING)
4427 err = l2cap_ertm_init(chan);
4430 l2cap_send_disconn_req(chan, -err);
4432 l2cap_chan_ready(chan);
4436 l2cap_chan_unlock(chan);
4440 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4441 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4444 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4445 struct l2cap_disconn_rsp rsp;
4447 struct l2cap_chan *chan;
4449 if (cmd_len != sizeof(*req))
4452 scid = __le16_to_cpu(req->scid);
4453 dcid = __le16_to_cpu(req->dcid);
4455 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4457 mutex_lock(&conn->chan_lock);
4459 chan = __l2cap_get_chan_by_scid(conn, dcid);
4461 mutex_unlock(&conn->chan_lock);
4462 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4466 l2cap_chan_lock(chan);
4468 rsp.dcid = cpu_to_le16(chan->scid);
4469 rsp.scid = cpu_to_le16(chan->dcid);
4470 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4472 chan->ops->set_shutdown(chan);
4474 l2cap_chan_hold(chan);
4475 l2cap_chan_del(chan, ECONNRESET);
4477 l2cap_chan_unlock(chan);
4479 chan->ops->close(chan);
4480 l2cap_chan_put(chan);
4482 mutex_unlock(&conn->chan_lock);
4487 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4488 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4491 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4493 struct l2cap_chan *chan;
4495 if (cmd_len != sizeof(*rsp))
4498 scid = __le16_to_cpu(rsp->scid);
4499 dcid = __le16_to_cpu(rsp->dcid);
4501 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4503 mutex_lock(&conn->chan_lock);
4505 chan = __l2cap_get_chan_by_scid(conn, scid);
4507 mutex_unlock(&conn->chan_lock);
4511 l2cap_chan_lock(chan);
4513 l2cap_chan_hold(chan);
4514 l2cap_chan_del(chan, 0);
4516 l2cap_chan_unlock(chan);
4518 chan->ops->close(chan);
4519 l2cap_chan_put(chan);
4521 mutex_unlock(&conn->chan_lock);
4526 static inline int l2cap_information_req(struct l2cap_conn *conn,
4527 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4530 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4533 if (cmd_len != sizeof(*req))
4536 type = __le16_to_cpu(req->type);
4538 BT_DBG("type 0x%4.4x", type);
4540 if (type == L2CAP_IT_FEAT_MASK) {
4542 u32 feat_mask = l2cap_feat_mask;
4543 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4544 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4545 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4547 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4549 if (conn->hs_enabled)
4550 feat_mask |= L2CAP_FEAT_EXT_FLOW
4551 | L2CAP_FEAT_EXT_WINDOW;
4553 put_unaligned_le32(feat_mask, rsp->data);
4554 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4556 } else if (type == L2CAP_IT_FIXED_CHAN) {
4558 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4560 if (conn->hs_enabled)
4561 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4563 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4565 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4566 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4567 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4568 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4571 struct l2cap_info_rsp rsp;
4572 rsp.type = cpu_to_le16(type);
4573 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4574 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4581 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4582 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4585 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4588 if (cmd_len < sizeof(*rsp))
4591 type = __le16_to_cpu(rsp->type);
4592 result = __le16_to_cpu(rsp->result);
4594 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4596 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4597 if (cmd->ident != conn->info_ident ||
4598 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4601 cancel_delayed_work(&conn->info_timer);
4603 if (result != L2CAP_IR_SUCCESS) {
4604 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4605 conn->info_ident = 0;
4607 l2cap_conn_start(conn);
4613 case L2CAP_IT_FEAT_MASK:
4614 conn->feat_mask = get_unaligned_le32(rsp->data);
4616 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4617 struct l2cap_info_req req;
4618 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4620 conn->info_ident = l2cap_get_ident(conn);
4622 l2cap_send_cmd(conn, conn->info_ident,
4623 L2CAP_INFO_REQ, sizeof(req), &req);
4625 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4626 conn->info_ident = 0;
4628 l2cap_conn_start(conn);
4632 case L2CAP_IT_FIXED_CHAN:
4633 conn->fixed_chan_mask = rsp->data[0];
4634 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4635 conn->info_ident = 0;
4637 l2cap_conn_start(conn);
4644 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4645 struct l2cap_cmd_hdr *cmd,
4646 u16 cmd_len, void *data)
4648 struct l2cap_create_chan_req *req = data;
4649 struct l2cap_create_chan_rsp rsp;
4650 struct l2cap_chan *chan;
4651 struct hci_dev *hdev;
4654 if (cmd_len != sizeof(*req))
4657 if (!conn->hs_enabled)
4660 psm = le16_to_cpu(req->psm);
4661 scid = le16_to_cpu(req->scid);
4663 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4665 /* For controller id 0 make BR/EDR connection */
4666 if (req->amp_id == AMP_ID_BREDR) {
4667 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4672 /* Validate AMP controller id */
4673 hdev = hci_dev_get(req->amp_id);
4677 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4682 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4685 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4686 struct hci_conn *hs_hcon;
4688 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4692 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4697 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4699 mgr->bredr_chan = chan;
4700 chan->hs_hcon = hs_hcon;
4701 chan->fcs = L2CAP_FCS_NONE;
4702 conn->mtu = hdev->block_mtu;
4711 rsp.scid = cpu_to_le16(scid);
4712 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4713 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4715 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4721 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4723 struct l2cap_move_chan_req req;
4726 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4728 ident = l2cap_get_ident(chan->conn);
4729 chan->ident = ident;
4731 req.icid = cpu_to_le16(chan->scid);
4732 req.dest_amp_id = dest_amp_id;
4734 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4737 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4740 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4742 struct l2cap_move_chan_rsp rsp;
4744 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4746 rsp.icid = cpu_to_le16(chan->dcid);
4747 rsp.result = cpu_to_le16(result);
4749 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4753 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4755 struct l2cap_move_chan_cfm cfm;
4757 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4759 chan->ident = l2cap_get_ident(chan->conn);
4761 cfm.icid = cpu_to_le16(chan->scid);
4762 cfm.result = cpu_to_le16(result);
4764 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4767 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4770 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4772 struct l2cap_move_chan_cfm cfm;
4774 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4776 cfm.icid = cpu_to_le16(icid);
4777 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4779 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4783 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4786 struct l2cap_move_chan_cfm_rsp rsp;
4788 BT_DBG("icid 0x%4.4x", icid);
4790 rsp.icid = cpu_to_le16(icid);
4791 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4794 static void __release_logical_link(struct l2cap_chan *chan)
4796 chan->hs_hchan = NULL;
4797 chan->hs_hcon = NULL;
4799 /* Placeholder - release the logical link */
4802 static void l2cap_logical_fail(struct l2cap_chan *chan)
4804 /* Logical link setup failed */
4805 if (chan->state != BT_CONNECTED) {
4806 /* Create channel failure, disconnect */
4807 l2cap_send_disconn_req(chan, ECONNRESET);
4811 switch (chan->move_role) {
4812 case L2CAP_MOVE_ROLE_RESPONDER:
4813 l2cap_move_done(chan);
4814 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4816 case L2CAP_MOVE_ROLE_INITIATOR:
4817 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4818 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4819 /* Remote has only sent pending or
4820 * success responses, clean up
4822 l2cap_move_done(chan);
4825 /* Other amp move states imply that the move
4826 * has already aborted
4828 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4833 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4834 struct hci_chan *hchan)
4836 struct l2cap_conf_rsp rsp;
4838 chan->hs_hchan = hchan;
4839 chan->hs_hcon->l2cap_data = chan->conn;
4841 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4843 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4846 set_default_fcs(chan);
4848 err = l2cap_ertm_init(chan);
4850 l2cap_send_disconn_req(chan, -err);
4852 l2cap_chan_ready(chan);
4856 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4857 struct hci_chan *hchan)
4859 chan->hs_hcon = hchan->conn;
4860 chan->hs_hcon->l2cap_data = chan->conn;
4862 BT_DBG("move_state %d", chan->move_state);
4864 switch (chan->move_state) {
4865 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4866 /* Move confirm will be sent after a success
4867 * response is received
4869 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4871 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4872 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4873 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4874 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4875 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4876 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4877 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4878 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4879 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4883 /* Move was not in expected state, free the channel */
4884 __release_logical_link(chan);
4886 chan->move_state = L2CAP_MOVE_STABLE;
4890 /* Call with chan locked */
4891 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4894 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4897 l2cap_logical_fail(chan);
4898 __release_logical_link(chan);
4902 if (chan->state != BT_CONNECTED) {
4903 /* Ignore logical link if channel is on BR/EDR */
4904 if (chan->local_amp_id != AMP_ID_BREDR)
4905 l2cap_logical_finish_create(chan, hchan);
4907 l2cap_logical_finish_move(chan, hchan);
4911 void l2cap_move_start(struct l2cap_chan *chan)
4913 BT_DBG("chan %p", chan);
4915 if (chan->local_amp_id == AMP_ID_BREDR) {
4916 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4918 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4919 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4920 /* Placeholder - start physical link setup */
4922 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4923 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4925 l2cap_move_setup(chan);
4926 l2cap_send_move_chan_req(chan, 0);
4930 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4931 u8 local_amp_id, u8 remote_amp_id)
4933 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4934 local_amp_id, remote_amp_id);
4936 chan->fcs = L2CAP_FCS_NONE;
4938 /* Outgoing channel on AMP */
4939 if (chan->state == BT_CONNECT) {
4940 if (result == L2CAP_CR_SUCCESS) {
4941 chan->local_amp_id = local_amp_id;
4942 l2cap_send_create_chan_req(chan, remote_amp_id);
4944 /* Revert to BR/EDR connect */
4945 l2cap_send_conn_req(chan);
4951 /* Incoming channel on AMP */
4952 if (__l2cap_no_conn_pending(chan)) {
4953 struct l2cap_conn_rsp rsp;
4955 rsp.scid = cpu_to_le16(chan->dcid);
4956 rsp.dcid = cpu_to_le16(chan->scid);
4958 if (result == L2CAP_CR_SUCCESS) {
4959 /* Send successful response */
4960 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4961 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4963 /* Send negative response */
4964 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4965 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4968 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4971 if (result == L2CAP_CR_SUCCESS) {
4972 l2cap_state_change(chan, BT_CONFIG);
4973 set_bit(CONF_REQ_SENT, &chan->conf_state);
4974 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4976 l2cap_build_conf_req(chan, buf), buf);
4977 chan->num_conf_req++;
4982 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4985 l2cap_move_setup(chan);
4986 chan->move_id = local_amp_id;
4987 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4989 l2cap_send_move_chan_req(chan, remote_amp_id);
4992 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4994 struct hci_chan *hchan = NULL;
4996 /* Placeholder - get hci_chan for logical link */
4999 if (hchan->state == BT_CONNECTED) {
5000 /* Logical link is ready to go */
5001 chan->hs_hcon = hchan->conn;
5002 chan->hs_hcon->l2cap_data = chan->conn;
5003 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5004 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5006 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5008 /* Wait for logical link to be ready */
5009 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5012 /* Logical link not available */
5013 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5017 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5019 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5021 if (result == -EINVAL)
5022 rsp_result = L2CAP_MR_BAD_ID;
5024 rsp_result = L2CAP_MR_NOT_ALLOWED;
5026 l2cap_send_move_chan_rsp(chan, rsp_result);
5029 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5030 chan->move_state = L2CAP_MOVE_STABLE;
5032 /* Restart data transmission */
5033 l2cap_ertm_send(chan);
5036 /* Invoke with locked chan */
5037 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5039 u8 local_amp_id = chan->local_amp_id;
5040 u8 remote_amp_id = chan->remote_amp_id;
5042 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5043 chan, result, local_amp_id, remote_amp_id);
5045 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5046 l2cap_chan_unlock(chan);
5050 if (chan->state != BT_CONNECTED) {
5051 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5052 } else if (result != L2CAP_MR_SUCCESS) {
5053 l2cap_do_move_cancel(chan, result);
5055 switch (chan->move_role) {
5056 case L2CAP_MOVE_ROLE_INITIATOR:
5057 l2cap_do_move_initiate(chan, local_amp_id,
5060 case L2CAP_MOVE_ROLE_RESPONDER:
5061 l2cap_do_move_respond(chan, result);
5064 l2cap_do_move_cancel(chan, result);
5070 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5071 struct l2cap_cmd_hdr *cmd,
5072 u16 cmd_len, void *data)
5074 struct l2cap_move_chan_req *req = data;
5075 struct l2cap_move_chan_rsp rsp;
5076 struct l2cap_chan *chan;
5078 u16 result = L2CAP_MR_NOT_ALLOWED;
5080 if (cmd_len != sizeof(*req))
5083 icid = le16_to_cpu(req->icid);
5085 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5087 if (!conn->hs_enabled)
5090 chan = l2cap_get_chan_by_dcid(conn, icid);
5092 rsp.icid = cpu_to_le16(icid);
5093 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5094 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5099 chan->ident = cmd->ident;
5101 if (chan->scid < L2CAP_CID_DYN_START ||
5102 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5103 (chan->mode != L2CAP_MODE_ERTM &&
5104 chan->mode != L2CAP_MODE_STREAMING)) {
5105 result = L2CAP_MR_NOT_ALLOWED;
5106 goto send_move_response;
5109 if (chan->local_amp_id == req->dest_amp_id) {
5110 result = L2CAP_MR_SAME_ID;
5111 goto send_move_response;
5114 if (req->dest_amp_id != AMP_ID_BREDR) {
5115 struct hci_dev *hdev;
5116 hdev = hci_dev_get(req->dest_amp_id);
5117 if (!hdev || hdev->dev_type != HCI_AMP ||
5118 !test_bit(HCI_UP, &hdev->flags)) {
5122 result = L2CAP_MR_BAD_ID;
5123 goto send_move_response;
5128 /* Detect a move collision. Only send a collision response
5129 * if this side has "lost", otherwise proceed with the move.
5130 * The winner has the larger bd_addr.
5132 if ((__chan_is_moving(chan) ||
5133 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5134 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5135 result = L2CAP_MR_COLLISION;
5136 goto send_move_response;
5139 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5140 l2cap_move_setup(chan);
5141 chan->move_id = req->dest_amp_id;
5144 if (req->dest_amp_id == AMP_ID_BREDR) {
5145 /* Moving to BR/EDR */
5146 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5147 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5148 result = L2CAP_MR_PEND;
5150 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5151 result = L2CAP_MR_SUCCESS;
5154 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5155 /* Placeholder - uncomment when amp functions are available */
5156 /*amp_accept_physical(chan, req->dest_amp_id);*/
5157 result = L2CAP_MR_PEND;
5161 l2cap_send_move_chan_rsp(chan, result);
5163 l2cap_chan_unlock(chan);
5168 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5170 struct l2cap_chan *chan;
5171 struct hci_chan *hchan = NULL;
5173 chan = l2cap_get_chan_by_scid(conn, icid);
5175 l2cap_send_move_chan_cfm_icid(conn, icid);
5179 __clear_chan_timer(chan);
5180 if (result == L2CAP_MR_PEND)
5181 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5183 switch (chan->move_state) {
5184 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5185 /* Move confirm will be sent when logical link
5188 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5190 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5191 if (result == L2CAP_MR_PEND) {
5193 } else if (test_bit(CONN_LOCAL_BUSY,
5194 &chan->conn_state)) {
5195 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5197 /* Logical link is up or moving to BR/EDR,
5200 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5201 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5204 case L2CAP_MOVE_WAIT_RSP:
5206 if (result == L2CAP_MR_SUCCESS) {
5207 /* Remote is ready, send confirm immediately
5208 * after logical link is ready
5210 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5212 /* Both logical link and move success
5213 * are required to confirm
5215 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5218 /* Placeholder - get hci_chan for logical link */
5220 /* Logical link not available */
5221 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5225 /* If the logical link is not yet connected, do not
5226 * send confirmation.
5228 if (hchan->state != BT_CONNECTED)
5231 /* Logical link is already ready to go */
5233 chan->hs_hcon = hchan->conn;
5234 chan->hs_hcon->l2cap_data = chan->conn;
5236 if (result == L2CAP_MR_SUCCESS) {
5237 /* Can confirm now */
5238 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5240 /* Now only need move success
5243 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5246 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5249 /* Any other amp move state means the move failed. */
5250 chan->move_id = chan->local_amp_id;
5251 l2cap_move_done(chan);
5252 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5255 l2cap_chan_unlock(chan);
5258 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5261 struct l2cap_chan *chan;
5263 chan = l2cap_get_chan_by_ident(conn, ident);
5265 /* Could not locate channel, icid is best guess */
5266 l2cap_send_move_chan_cfm_icid(conn, icid);
5270 __clear_chan_timer(chan);
5272 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5273 if (result == L2CAP_MR_COLLISION) {
5274 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5276 /* Cleanup - cancel move */
5277 chan->move_id = chan->local_amp_id;
5278 l2cap_move_done(chan);
5282 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5284 l2cap_chan_unlock(chan);
5287 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5288 struct l2cap_cmd_hdr *cmd,
5289 u16 cmd_len, void *data)
5291 struct l2cap_move_chan_rsp *rsp = data;
5294 if (cmd_len != sizeof(*rsp))
5297 icid = le16_to_cpu(rsp->icid);
5298 result = le16_to_cpu(rsp->result);
5300 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5302 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5303 l2cap_move_continue(conn, icid, result);
5305 l2cap_move_fail(conn, cmd->ident, icid, result);
5310 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5311 struct l2cap_cmd_hdr *cmd,
5312 u16 cmd_len, void *data)
5314 struct l2cap_move_chan_cfm *cfm = data;
5315 struct l2cap_chan *chan;
5318 if (cmd_len != sizeof(*cfm))
5321 icid = le16_to_cpu(cfm->icid);
5322 result = le16_to_cpu(cfm->result);
5324 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5326 chan = l2cap_get_chan_by_dcid(conn, icid);
5328 /* Spec requires a response even if the icid was not found */
5329 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5333 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5334 if (result == L2CAP_MC_CONFIRMED) {
5335 chan->local_amp_id = chan->move_id;
5336 if (chan->local_amp_id == AMP_ID_BREDR)
5337 __release_logical_link(chan);
5339 chan->move_id = chan->local_amp_id;
5342 l2cap_move_done(chan);
5345 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5347 l2cap_chan_unlock(chan);
5352 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5353 struct l2cap_cmd_hdr *cmd,
5354 u16 cmd_len, void *data)
5356 struct l2cap_move_chan_cfm_rsp *rsp = data;
5357 struct l2cap_chan *chan;
5360 if (cmd_len != sizeof(*rsp))
5363 icid = le16_to_cpu(rsp->icid);
5365 BT_DBG("icid 0x%4.4x", icid);
5367 chan = l2cap_get_chan_by_scid(conn, icid);
5371 __clear_chan_timer(chan);
5373 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5374 chan->local_amp_id = chan->move_id;
5376 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5377 __release_logical_link(chan);
5379 l2cap_move_done(chan);
5382 l2cap_chan_unlock(chan);
5387 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5392 if (min > max || min < 6 || max > 3200)
5395 if (to_multiplier < 10 || to_multiplier > 3200)
5398 if (max >= to_multiplier * 8)
5401 max_latency = (to_multiplier * 8 / max) - 1;
5402 if (latency > 499 || latency > max_latency)
5408 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5409 struct l2cap_cmd_hdr *cmd,
5410 u16 cmd_len, u8 *data)
5412 struct hci_conn *hcon = conn->hcon;
5413 struct l2cap_conn_param_update_req *req;
5414 struct l2cap_conn_param_update_rsp rsp;
5415 u16 min, max, latency, to_multiplier;
5418 if (!(hcon->link_mode & HCI_LM_MASTER))
5421 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5424 req = (struct l2cap_conn_param_update_req *) data;
5425 min = __le16_to_cpu(req->min);
5426 max = __le16_to_cpu(req->max);
5427 latency = __le16_to_cpu(req->latency);
5428 to_multiplier = __le16_to_cpu(req->to_multiplier);
5430 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5431 min, max, latency, to_multiplier);
5433 memset(&rsp, 0, sizeof(rsp));
5435 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5437 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5439 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5441 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5445 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5450 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5451 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5454 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5455 u16 dcid, mtu, mps, credits, result;
5456 struct l2cap_chan *chan;
5459 if (cmd_len < sizeof(*rsp))
5462 dcid = __le16_to_cpu(rsp->dcid);
5463 mtu = __le16_to_cpu(rsp->mtu);
5464 mps = __le16_to_cpu(rsp->mps);
5465 credits = __le16_to_cpu(rsp->credits);
5466 result = __le16_to_cpu(rsp->result);
5468 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5471 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5472 dcid, mtu, mps, credits, result);
5474 mutex_lock(&conn->chan_lock);
5476 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5484 l2cap_chan_lock(chan);
5487 case L2CAP_CR_SUCCESS:
5491 chan->remote_mps = mps;
5492 chan->tx_credits = credits;
5493 l2cap_chan_ready(chan);
5497 l2cap_chan_del(chan, ECONNREFUSED);
5501 l2cap_chan_unlock(chan);
5504 mutex_unlock(&conn->chan_lock);
5509 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5510 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5515 switch (cmd->code) {
5516 case L2CAP_COMMAND_REJ:
5517 l2cap_command_rej(conn, cmd, cmd_len, data);
5520 case L2CAP_CONN_REQ:
5521 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5524 case L2CAP_CONN_RSP:
5525 case L2CAP_CREATE_CHAN_RSP:
5526 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5529 case L2CAP_CONF_REQ:
5530 err = l2cap_config_req(conn, cmd, cmd_len, data);
5533 case L2CAP_CONF_RSP:
5534 l2cap_config_rsp(conn, cmd, cmd_len, data);
5537 case L2CAP_DISCONN_REQ:
5538 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5541 case L2CAP_DISCONN_RSP:
5542 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5545 case L2CAP_ECHO_REQ:
5546 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5549 case L2CAP_ECHO_RSP:
5552 case L2CAP_INFO_REQ:
5553 err = l2cap_information_req(conn, cmd, cmd_len, data);
5556 case L2CAP_INFO_RSP:
5557 l2cap_information_rsp(conn, cmd, cmd_len, data);
5560 case L2CAP_CREATE_CHAN_REQ:
5561 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5564 case L2CAP_MOVE_CHAN_REQ:
5565 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5568 case L2CAP_MOVE_CHAN_RSP:
5569 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5572 case L2CAP_MOVE_CHAN_CFM:
5573 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5576 case L2CAP_MOVE_CHAN_CFM_RSP:
5577 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5581 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5589 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5590 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5593 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5594 struct l2cap_le_conn_rsp rsp;
5595 struct l2cap_chan *chan, *pchan;
5596 u16 dcid, scid, credits, mtu, mps;
5600 if (cmd_len != sizeof(*req))
5603 scid = __le16_to_cpu(req->scid);
5604 mtu = __le16_to_cpu(req->mtu);
5605 mps = __le16_to_cpu(req->mps);
5610 if (mtu < 23 || mps < 23)
5613 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5616 /* Check if we have socket listening on psm */
5617 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5618 &conn->hcon->dst, LE_LINK);
5620 result = L2CAP_CR_BAD_PSM;
5625 mutex_lock(&conn->chan_lock);
5626 l2cap_chan_lock(pchan);
5628 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5629 result = L2CAP_CR_AUTHENTICATION;
5631 goto response_unlock;
5634 /* Check if we already have channel with that dcid */
5635 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5636 result = L2CAP_CR_NO_MEM;
5638 goto response_unlock;
5641 chan = pchan->ops->new_connection(pchan);
5643 result = L2CAP_CR_NO_MEM;
5644 goto response_unlock;
5647 bacpy(&chan->src, &conn->hcon->src);
5648 bacpy(&chan->dst, &conn->hcon->dst);
5649 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5650 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5654 chan->remote_mps = mps;
5655 chan->tx_credits = __le16_to_cpu(req->credits);
5657 __l2cap_chan_add(conn, chan);
5659 credits = chan->rx_credits;
5661 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5663 chan->ident = cmd->ident;
5665 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5666 l2cap_state_change(chan, BT_CONNECT2);
5667 result = L2CAP_CR_PEND;
5668 chan->ops->defer(chan);
5670 l2cap_chan_ready(chan);
5671 result = L2CAP_CR_SUCCESS;
5675 l2cap_chan_unlock(pchan);
5676 mutex_unlock(&conn->chan_lock);
5678 if (result == L2CAP_CR_PEND)
5683 rsp.mtu = cpu_to_le16(chan->imtu);
5684 rsp.mps = cpu_to_le16(chan->mps);
5690 rsp.dcid = cpu_to_le16(dcid);
5691 rsp.credits = cpu_to_le16(credits);
5692 rsp.result = cpu_to_le16(result);
5694 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5699 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5700 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5703 struct l2cap_le_credits *pkt;
5704 struct l2cap_chan *chan;
5707 if (cmd_len != sizeof(*pkt))
5710 pkt = (struct l2cap_le_credits *) data;
5711 cid = __le16_to_cpu(pkt->cid);
5712 credits = __le16_to_cpu(pkt->credits);
5714 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5716 chan = l2cap_get_chan_by_dcid(conn, cid);
5720 chan->tx_credits += credits;
5722 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5723 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5727 if (chan->tx_credits)
5728 chan->ops->resume(chan);
5730 l2cap_chan_unlock(chan);
5735 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5736 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5741 if (!enable_lecoc) {
5742 switch (cmd->code) {
5743 case L2CAP_LE_CONN_REQ:
5744 case L2CAP_LE_CONN_RSP:
5745 case L2CAP_LE_CREDITS:
5746 case L2CAP_DISCONN_REQ:
5747 case L2CAP_DISCONN_RSP:
5752 switch (cmd->code) {
5753 case L2CAP_COMMAND_REJ:
5756 case L2CAP_CONN_PARAM_UPDATE_REQ:
5757 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5760 case L2CAP_CONN_PARAM_UPDATE_RSP:
5763 case L2CAP_LE_CONN_RSP:
5764 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5767 case L2CAP_LE_CONN_REQ:
5768 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5771 case L2CAP_LE_CREDITS:
5772 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5775 case L2CAP_DISCONN_REQ:
5776 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5779 case L2CAP_DISCONN_RSP:
5780 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5784 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5792 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5793 struct sk_buff *skb)
5795 struct hci_conn *hcon = conn->hcon;
5796 struct l2cap_cmd_hdr *cmd;
5800 if (hcon->type != LE_LINK)
5803 if (skb->len < L2CAP_CMD_HDR_SIZE)
5806 cmd = (void *) skb->data;
5807 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5809 len = le16_to_cpu(cmd->len);
5811 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5813 if (len != skb->len || !cmd->ident) {
5814 BT_DBG("corrupted command");
5818 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5820 struct l2cap_cmd_rej_unk rej;
5822 BT_ERR("Wrong link type (%d)", err);
5824 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5825 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5833 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5834 struct sk_buff *skb)
5836 struct hci_conn *hcon = conn->hcon;
5837 u8 *data = skb->data;
5839 struct l2cap_cmd_hdr cmd;
5842 l2cap_raw_recv(conn, skb);
5844 if (hcon->type != ACL_LINK)
5847 while (len >= L2CAP_CMD_HDR_SIZE) {
5849 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5850 data += L2CAP_CMD_HDR_SIZE;
5851 len -= L2CAP_CMD_HDR_SIZE;
5853 cmd_len = le16_to_cpu(cmd.len);
5855 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5858 if (cmd_len > len || !cmd.ident) {
5859 BT_DBG("corrupted command");
5863 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5865 struct l2cap_cmd_rej_unk rej;
5867 BT_ERR("Wrong link type (%d)", err);
5869 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5870 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5882 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5884 u16 our_fcs, rcv_fcs;
5887 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5888 hdr_size = L2CAP_EXT_HDR_SIZE;
5890 hdr_size = L2CAP_ENH_HDR_SIZE;
5892 if (chan->fcs == L2CAP_FCS_CRC16) {
5893 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5894 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5895 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5897 if (our_fcs != rcv_fcs)
5903 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5905 struct l2cap_ctrl control;
5907 BT_DBG("chan %p", chan);
5909 memset(&control, 0, sizeof(control));
5912 control.reqseq = chan->buffer_seq;
5913 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5915 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5916 control.super = L2CAP_SUPER_RNR;
5917 l2cap_send_sframe(chan, &control);
5920 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5921 chan->unacked_frames > 0)
5922 __set_retrans_timer(chan);
5924 /* Send pending iframes */
5925 l2cap_ertm_send(chan);
5927 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5928 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5929 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5932 control.super = L2CAP_SUPER_RR;
5933 l2cap_send_sframe(chan, &control);
5937 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5938 struct sk_buff **last_frag)
5940 /* skb->len reflects data in skb as well as all fragments
5941 * skb->data_len reflects only data in fragments
5943 if (!skb_has_frag_list(skb))
5944 skb_shinfo(skb)->frag_list = new_frag;
5946 new_frag->next = NULL;
5948 (*last_frag)->next = new_frag;
5949 *last_frag = new_frag;
5951 skb->len += new_frag->len;
5952 skb->data_len += new_frag->len;
5953 skb->truesize += new_frag->truesize;
5956 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5957 struct l2cap_ctrl *control)
5961 switch (control->sar) {
5962 case L2CAP_SAR_UNSEGMENTED:
5966 err = chan->ops->recv(chan, skb);
5969 case L2CAP_SAR_START:
5973 chan->sdu_len = get_unaligned_le16(skb->data);
5974 skb_pull(skb, L2CAP_SDULEN_SIZE);
5976 if (chan->sdu_len > chan->imtu) {
5981 if (skb->len >= chan->sdu_len)
5985 chan->sdu_last_frag = skb;
5991 case L2CAP_SAR_CONTINUE:
5995 append_skb_frag(chan->sdu, skb,
5996 &chan->sdu_last_frag);
5999 if (chan->sdu->len >= chan->sdu_len)
6009 append_skb_frag(chan->sdu, skb,
6010 &chan->sdu_last_frag);
6013 if (chan->sdu->len != chan->sdu_len)
6016 err = chan->ops->recv(chan, chan->sdu);
6019 /* Reassembly complete */
6021 chan->sdu_last_frag = NULL;
6029 kfree_skb(chan->sdu);
6031 chan->sdu_last_frag = NULL;
6038 static int l2cap_resegment(struct l2cap_chan *chan)
6044 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6048 if (chan->mode != L2CAP_MODE_ERTM)
6051 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6052 l2cap_tx(chan, NULL, NULL, event);
6055 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6058 /* Pass sequential frames to l2cap_reassemble_sdu()
6059 * until a gap is encountered.
6062 BT_DBG("chan %p", chan);
6064 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6065 struct sk_buff *skb;
6066 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6067 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6069 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6074 skb_unlink(skb, &chan->srej_q);
6075 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6076 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6081 if (skb_queue_empty(&chan->srej_q)) {
6082 chan->rx_state = L2CAP_RX_STATE_RECV;
6083 l2cap_send_ack(chan);
6089 static void l2cap_handle_srej(struct l2cap_chan *chan,
6090 struct l2cap_ctrl *control)
6092 struct sk_buff *skb;
6094 BT_DBG("chan %p, control %p", chan, control);
6096 if (control->reqseq == chan->next_tx_seq) {
6097 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6098 l2cap_send_disconn_req(chan, ECONNRESET);
6102 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6105 BT_DBG("Seq %d not available for retransmission",
6110 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6111 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6112 l2cap_send_disconn_req(chan, ECONNRESET);
6116 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6118 if (control->poll) {
6119 l2cap_pass_to_tx(chan, control);
6121 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6122 l2cap_retransmit(chan, control);
6123 l2cap_ertm_send(chan);
6125 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6126 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6127 chan->srej_save_reqseq = control->reqseq;
6130 l2cap_pass_to_tx_fbit(chan, control);
6132 if (control->final) {
6133 if (chan->srej_save_reqseq != control->reqseq ||
6134 !test_and_clear_bit(CONN_SREJ_ACT,
6136 l2cap_retransmit(chan, control);
6138 l2cap_retransmit(chan, control);
6139 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6140 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6141 chan->srej_save_reqseq = control->reqseq;
6147 static void l2cap_handle_rej(struct l2cap_chan *chan,
6148 struct l2cap_ctrl *control)
6150 struct sk_buff *skb;
6152 BT_DBG("chan %p, control %p", chan, control);
6154 if (control->reqseq == chan->next_tx_seq) {
6155 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6156 l2cap_send_disconn_req(chan, ECONNRESET);
6160 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6162 if (chan->max_tx && skb &&
6163 bt_cb(skb)->control.retries >= chan->max_tx) {
6164 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6165 l2cap_send_disconn_req(chan, ECONNRESET);
6169 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6171 l2cap_pass_to_tx(chan, control);
6173 if (control->final) {
6174 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6175 l2cap_retransmit_all(chan, control);
6177 l2cap_retransmit_all(chan, control);
6178 l2cap_ertm_send(chan);
6179 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6180 set_bit(CONN_REJ_ACT, &chan->conn_state);
6184 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6186 BT_DBG("chan %p, txseq %d", chan, txseq);
6188 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6189 chan->expected_tx_seq);
6191 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6192 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6194 /* See notes below regarding "double poll" and
6197 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6198 BT_DBG("Invalid/Ignore - after SREJ");
6199 return L2CAP_TXSEQ_INVALID_IGNORE;
6201 BT_DBG("Invalid - in window after SREJ sent");
6202 return L2CAP_TXSEQ_INVALID;
6206 if (chan->srej_list.head == txseq) {
6207 BT_DBG("Expected SREJ");
6208 return L2CAP_TXSEQ_EXPECTED_SREJ;
6211 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6212 BT_DBG("Duplicate SREJ - txseq already stored");
6213 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6216 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6217 BT_DBG("Unexpected SREJ - not requested");
6218 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6222 if (chan->expected_tx_seq == txseq) {
6223 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6225 BT_DBG("Invalid - txseq outside tx window");
6226 return L2CAP_TXSEQ_INVALID;
6229 return L2CAP_TXSEQ_EXPECTED;
6233 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6234 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6235 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6236 return L2CAP_TXSEQ_DUPLICATE;
6239 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6240 /* A source of invalid packets is a "double poll" condition,
6241 * where delays cause us to send multiple poll packets. If
6242 * the remote stack receives and processes both polls,
6243 * sequence numbers can wrap around in such a way that a
6244 * resent frame has a sequence number that looks like new data
6245 * with a sequence gap. This would trigger an erroneous SREJ
6248 * Fortunately, this is impossible with a tx window that's
6249 * less than half of the maximum sequence number, which allows
6250 * invalid frames to be safely ignored.
6252 * With tx window sizes greater than half of the tx window
6253 * maximum, the frame is invalid and cannot be ignored. This
6254 * causes a disconnect.
6257 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6258 BT_DBG("Invalid/Ignore - txseq outside tx window");
6259 return L2CAP_TXSEQ_INVALID_IGNORE;
6261 BT_DBG("Invalid - txseq outside tx window");
6262 return L2CAP_TXSEQ_INVALID;
6265 BT_DBG("Unexpected - txseq indicates missing frames");
6266 return L2CAP_TXSEQ_UNEXPECTED;
6270 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6271 struct l2cap_ctrl *control,
6272 struct sk_buff *skb, u8 event)
6275 bool skb_in_use = false;
6277 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6281 case L2CAP_EV_RECV_IFRAME:
6282 switch (l2cap_classify_txseq(chan, control->txseq)) {
6283 case L2CAP_TXSEQ_EXPECTED:
6284 l2cap_pass_to_tx(chan, control);
6286 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6287 BT_DBG("Busy, discarding expected seq %d",
6292 chan->expected_tx_seq = __next_seq(chan,
6295 chan->buffer_seq = chan->expected_tx_seq;
6298 err = l2cap_reassemble_sdu(chan, skb, control);
6302 if (control->final) {
6303 if (!test_and_clear_bit(CONN_REJ_ACT,
6304 &chan->conn_state)) {
6306 l2cap_retransmit_all(chan, control);
6307 l2cap_ertm_send(chan);
6311 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6312 l2cap_send_ack(chan);
6314 case L2CAP_TXSEQ_UNEXPECTED:
6315 l2cap_pass_to_tx(chan, control);
6317 /* Can't issue SREJ frames in the local busy state.
6318 * Drop this frame, it will be seen as missing
6319 * when local busy is exited.
6321 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6322 BT_DBG("Busy, discarding unexpected seq %d",
6327 /* There was a gap in the sequence, so an SREJ
6328 * must be sent for each missing frame. The
6329 * current frame is stored for later use.
6331 skb_queue_tail(&chan->srej_q, skb);
6333 BT_DBG("Queued %p (queue len %d)", skb,
6334 skb_queue_len(&chan->srej_q));
6336 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6337 l2cap_seq_list_clear(&chan->srej_list);
6338 l2cap_send_srej(chan, control->txseq);
6340 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6342 case L2CAP_TXSEQ_DUPLICATE:
6343 l2cap_pass_to_tx(chan, control);
6345 case L2CAP_TXSEQ_INVALID_IGNORE:
6347 case L2CAP_TXSEQ_INVALID:
6349 l2cap_send_disconn_req(chan, ECONNRESET);
6353 case L2CAP_EV_RECV_RR:
6354 l2cap_pass_to_tx(chan, control);
6355 if (control->final) {
6356 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6358 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6359 !__chan_is_moving(chan)) {
6361 l2cap_retransmit_all(chan, control);
6364 l2cap_ertm_send(chan);
6365 } else if (control->poll) {
6366 l2cap_send_i_or_rr_or_rnr(chan);
6368 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6369 &chan->conn_state) &&
6370 chan->unacked_frames)
6371 __set_retrans_timer(chan);
6373 l2cap_ertm_send(chan);
6376 case L2CAP_EV_RECV_RNR:
6377 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6378 l2cap_pass_to_tx(chan, control);
6379 if (control && control->poll) {
6380 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6381 l2cap_send_rr_or_rnr(chan, 0);
6383 __clear_retrans_timer(chan);
6384 l2cap_seq_list_clear(&chan->retrans_list);
6386 case L2CAP_EV_RECV_REJ:
6387 l2cap_handle_rej(chan, control);
6389 case L2CAP_EV_RECV_SREJ:
6390 l2cap_handle_srej(chan, control);
6396 if (skb && !skb_in_use) {
6397 BT_DBG("Freeing %p", skb);
6404 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6405 struct l2cap_ctrl *control,
6406 struct sk_buff *skb, u8 event)
6409 u16 txseq = control->txseq;
6410 bool skb_in_use = false;
6412 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6416 case L2CAP_EV_RECV_IFRAME:
6417 switch (l2cap_classify_txseq(chan, txseq)) {
6418 case L2CAP_TXSEQ_EXPECTED:
6419 /* Keep frame for reassembly later */
6420 l2cap_pass_to_tx(chan, control);
6421 skb_queue_tail(&chan->srej_q, skb);
6423 BT_DBG("Queued %p (queue len %d)", skb,
6424 skb_queue_len(&chan->srej_q));
6426 chan->expected_tx_seq = __next_seq(chan, txseq);
6428 case L2CAP_TXSEQ_EXPECTED_SREJ:
6429 l2cap_seq_list_pop(&chan->srej_list);
6431 l2cap_pass_to_tx(chan, control);
6432 skb_queue_tail(&chan->srej_q, skb);
6434 BT_DBG("Queued %p (queue len %d)", skb,
6435 skb_queue_len(&chan->srej_q));
6437 err = l2cap_rx_queued_iframes(chan);
6442 case L2CAP_TXSEQ_UNEXPECTED:
6443 /* Got a frame that can't be reassembled yet.
6444 * Save it for later, and send SREJs to cover
6445 * the missing frames.
6447 skb_queue_tail(&chan->srej_q, skb);
6449 BT_DBG("Queued %p (queue len %d)", skb,
6450 skb_queue_len(&chan->srej_q));
6452 l2cap_pass_to_tx(chan, control);
6453 l2cap_send_srej(chan, control->txseq);
6455 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6456 /* This frame was requested with an SREJ, but
6457 * some expected retransmitted frames are
6458 * missing. Request retransmission of missing
6461 skb_queue_tail(&chan->srej_q, skb);
6463 BT_DBG("Queued %p (queue len %d)", skb,
6464 skb_queue_len(&chan->srej_q));
6466 l2cap_pass_to_tx(chan, control);
6467 l2cap_send_srej_list(chan, control->txseq);
6469 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6470 /* We've already queued this frame. Drop this copy. */
6471 l2cap_pass_to_tx(chan, control);
6473 case L2CAP_TXSEQ_DUPLICATE:
6474 /* Expecting a later sequence number, so this frame
6475 * was already received. Ignore it completely.
6478 case L2CAP_TXSEQ_INVALID_IGNORE:
6480 case L2CAP_TXSEQ_INVALID:
6482 l2cap_send_disconn_req(chan, ECONNRESET);
6486 case L2CAP_EV_RECV_RR:
6487 l2cap_pass_to_tx(chan, control);
6488 if (control->final) {
6489 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6491 if (!test_and_clear_bit(CONN_REJ_ACT,
6492 &chan->conn_state)) {
6494 l2cap_retransmit_all(chan, control);
6497 l2cap_ertm_send(chan);
6498 } else if (control->poll) {
6499 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6500 &chan->conn_state) &&
6501 chan->unacked_frames) {
6502 __set_retrans_timer(chan);
6505 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6506 l2cap_send_srej_tail(chan);
6508 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6509 &chan->conn_state) &&
6510 chan->unacked_frames)
6511 __set_retrans_timer(chan);
6513 l2cap_send_ack(chan);
6516 case L2CAP_EV_RECV_RNR:
6517 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6518 l2cap_pass_to_tx(chan, control);
6519 if (control->poll) {
6520 l2cap_send_srej_tail(chan);
6522 struct l2cap_ctrl rr_control;
6523 memset(&rr_control, 0, sizeof(rr_control));
6524 rr_control.sframe = 1;
6525 rr_control.super = L2CAP_SUPER_RR;
6526 rr_control.reqseq = chan->buffer_seq;
6527 l2cap_send_sframe(chan, &rr_control);
6531 case L2CAP_EV_RECV_REJ:
6532 l2cap_handle_rej(chan, control);
6534 case L2CAP_EV_RECV_SREJ:
6535 l2cap_handle_srej(chan, control);
6539 if (skb && !skb_in_use) {
6540 BT_DBG("Freeing %p", skb);
6547 static int l2cap_finish_move(struct l2cap_chan *chan)
6549 BT_DBG("chan %p", chan);
6551 chan->rx_state = L2CAP_RX_STATE_RECV;
6554 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6556 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6558 return l2cap_resegment(chan);
6561 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6562 struct l2cap_ctrl *control,
6563 struct sk_buff *skb, u8 event)
6567 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6573 l2cap_process_reqseq(chan, control->reqseq);
6575 if (!skb_queue_empty(&chan->tx_q))
6576 chan->tx_send_head = skb_peek(&chan->tx_q);
6578 chan->tx_send_head = NULL;
6580 /* Rewind next_tx_seq to the point expected
6583 chan->next_tx_seq = control->reqseq;
6584 chan->unacked_frames = 0;
6586 err = l2cap_finish_move(chan);
6590 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6591 l2cap_send_i_or_rr_or_rnr(chan);
6593 if (event == L2CAP_EV_RECV_IFRAME)
6596 return l2cap_rx_state_recv(chan, control, NULL, event);
6599 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6600 struct l2cap_ctrl *control,
6601 struct sk_buff *skb, u8 event)
6605 if (!control->final)
6608 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6610 chan->rx_state = L2CAP_RX_STATE_RECV;
6611 l2cap_process_reqseq(chan, control->reqseq);
6613 if (!skb_queue_empty(&chan->tx_q))
6614 chan->tx_send_head = skb_peek(&chan->tx_q);
6616 chan->tx_send_head = NULL;
6618 /* Rewind next_tx_seq to the point expected
6621 chan->next_tx_seq = control->reqseq;
6622 chan->unacked_frames = 0;
6625 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6627 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6629 err = l2cap_resegment(chan);
6632 err = l2cap_rx_state_recv(chan, control, skb, event);
6637 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6639 /* Make sure reqseq is for a packet that has been sent but not acked */
6642 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6643 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6646 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6647 struct sk_buff *skb, u8 event)
6651 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6652 control, skb, event, chan->rx_state);
6654 if (__valid_reqseq(chan, control->reqseq)) {
6655 switch (chan->rx_state) {
6656 case L2CAP_RX_STATE_RECV:
6657 err = l2cap_rx_state_recv(chan, control, skb, event);
6659 case L2CAP_RX_STATE_SREJ_SENT:
6660 err = l2cap_rx_state_srej_sent(chan, control, skb,
6663 case L2CAP_RX_STATE_WAIT_P:
6664 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6666 case L2CAP_RX_STATE_WAIT_F:
6667 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6674 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6675 control->reqseq, chan->next_tx_seq,
6676 chan->expected_ack_seq);
6677 l2cap_send_disconn_req(chan, ECONNRESET);
6683 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6684 struct sk_buff *skb)
6688 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6691 if (l2cap_classify_txseq(chan, control->txseq) ==
6692 L2CAP_TXSEQ_EXPECTED) {
6693 l2cap_pass_to_tx(chan, control);
6695 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6696 __next_seq(chan, chan->buffer_seq));
6698 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6700 l2cap_reassemble_sdu(chan, skb, control);
6703 kfree_skb(chan->sdu);
6706 chan->sdu_last_frag = NULL;
6710 BT_DBG("Freeing %p", skb);
6715 chan->last_acked_seq = control->txseq;
6716 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6721 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6723 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6727 __unpack_control(chan, skb);
6732 * We can just drop the corrupted I-frame here.
6733 * Receiver will miss it and start proper recovery
6734 * procedures and ask for retransmission.
6736 if (l2cap_check_fcs(chan, skb))
6739 if (!control->sframe && control->sar == L2CAP_SAR_START)
6740 len -= L2CAP_SDULEN_SIZE;
6742 if (chan->fcs == L2CAP_FCS_CRC16)
6743 len -= L2CAP_FCS_SIZE;
6745 if (len > chan->mps) {
6746 l2cap_send_disconn_req(chan, ECONNRESET);
6750 if (!control->sframe) {
6753 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6754 control->sar, control->reqseq, control->final,
6757 /* Validate F-bit - F=0 always valid, F=1 only
6758 * valid in TX WAIT_F
6760 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6763 if (chan->mode != L2CAP_MODE_STREAMING) {
6764 event = L2CAP_EV_RECV_IFRAME;
6765 err = l2cap_rx(chan, control, skb, event);
6767 err = l2cap_stream_rx(chan, control, skb);
6771 l2cap_send_disconn_req(chan, ECONNRESET);
6773 const u8 rx_func_to_event[4] = {
6774 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6775 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6778 /* Only I-frames are expected in streaming mode */
6779 if (chan->mode == L2CAP_MODE_STREAMING)
6782 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6783 control->reqseq, control->final, control->poll,
6787 BT_ERR("Trailing bytes: %d in sframe", len);
6788 l2cap_send_disconn_req(chan, ECONNRESET);
6792 /* Validate F and P bits */
6793 if (control->final && (control->poll ||
6794 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6797 event = rx_func_to_event[control->super];
6798 if (l2cap_rx(chan, control, skb, event))
6799 l2cap_send_disconn_req(chan, ECONNRESET);
6809 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6811 struct l2cap_conn *conn = chan->conn;
6812 struct l2cap_le_credits pkt;
6815 /* We return more credits to the sender only after the amount of
6816 * credits falls below half of the initial amount.
6818 if (chan->rx_credits >= (L2CAP_LE_MAX_CREDITS + 1) / 2)
6821 return_credits = L2CAP_LE_MAX_CREDITS - chan->rx_credits;
6823 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6825 chan->rx_credits += return_credits;
6827 pkt.cid = cpu_to_le16(chan->scid);
6828 pkt.credits = cpu_to_le16(return_credits);
6830 chan->ident = l2cap_get_ident(conn);
6832 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6835 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6839 if (!chan->rx_credits) {
6840 BT_ERR("No credits to receive LE L2CAP data");
6844 if (chan->imtu < skb->len) {
6845 BT_ERR("Too big LE L2CAP PDU");
6850 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6852 l2cap_chan_le_send_credits(chan);
6859 sdu_len = get_unaligned_le16(skb->data);
6860 skb_pull(skb, L2CAP_SDULEN_SIZE);
6862 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6863 sdu_len, skb->len, chan->imtu);
6865 if (sdu_len > chan->imtu) {
6866 BT_ERR("Too big LE L2CAP SDU length received");
6871 if (skb->len > sdu_len) {
6872 BT_ERR("Too much LE L2CAP data received");
6877 if (skb->len == sdu_len)
6878 return chan->ops->recv(chan, skb);
6881 chan->sdu_len = sdu_len;
6882 chan->sdu_last_frag = skb;
6887 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6888 chan->sdu->len, skb->len, chan->sdu_len);
6890 if (chan->sdu->len + skb->len > chan->sdu_len) {
6891 BT_ERR("Too much LE L2CAP data received");
6896 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6899 if (chan->sdu->len == chan->sdu_len) {
6900 err = chan->ops->recv(chan, chan->sdu);
6903 chan->sdu_last_frag = NULL;
6911 kfree_skb(chan->sdu);
6913 chan->sdu_last_frag = NULL;
6917 /* We can't return an error here since we took care of the skb
6918 * freeing internally. An error return would cause the caller to
6919 * do a double-free of the skb.
6924 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6925 struct sk_buff *skb)
6927 struct l2cap_chan *chan;
6929 chan = l2cap_get_chan_by_scid(conn, cid);
6931 if (cid == L2CAP_CID_A2MP) {
6932 chan = a2mp_channel_create(conn, skb);
6938 l2cap_chan_lock(chan);
6940 BT_DBG("unknown cid 0x%4.4x", cid);
6941 /* Drop packet and return */
6947 BT_DBG("chan %p, len %d", chan, skb->len);
6949 if (chan->state != BT_CONNECTED)
6952 switch (chan->mode) {
6953 case L2CAP_MODE_LE_FLOWCTL:
6954 if (l2cap_le_data_rcv(chan, skb) < 0)
6959 case L2CAP_MODE_BASIC:
6960 /* If socket recv buffers overflows we drop data here
6961 * which is *bad* because L2CAP has to be reliable.
6962 * But we don't have any other choice. L2CAP doesn't
6963 * provide flow control mechanism. */
6965 if (chan->imtu < skb->len)
6968 if (!chan->ops->recv(chan, skb))
6972 case L2CAP_MODE_ERTM:
6973 case L2CAP_MODE_STREAMING:
6974 l2cap_data_rcv(chan, skb);
6978 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6986 l2cap_chan_unlock(chan);
6989 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6990 struct sk_buff *skb)
6992 struct hci_conn *hcon = conn->hcon;
6993 struct l2cap_chan *chan;
6995 if (hcon->type != ACL_LINK)
6998 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7003 BT_DBG("chan %p, len %d", chan, skb->len);
7005 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7008 if (chan->imtu < skb->len)
7011 /* Store remote BD_ADDR and PSM for msg_name */
7012 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7013 bt_cb(skb)->psm = psm;
7015 if (!chan->ops->recv(chan, skb))
7022 static void l2cap_att_channel(struct l2cap_conn *conn,
7023 struct sk_buff *skb)
7025 struct hci_conn *hcon = conn->hcon;
7026 struct l2cap_chan *chan;
7028 if (hcon->type != LE_LINK)
7031 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7032 &hcon->src, &hcon->dst);
7036 BT_DBG("chan %p, len %d", chan, skb->len);
7038 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7041 if (chan->imtu < skb->len)
7044 if (!chan->ops->recv(chan, skb))
7051 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7053 struct l2cap_hdr *lh = (void *) skb->data;
7057 skb_pull(skb, L2CAP_HDR_SIZE);
7058 cid = __le16_to_cpu(lh->cid);
7059 len = __le16_to_cpu(lh->len);
7061 if (len != skb->len) {
7066 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7069 case L2CAP_CID_SIGNALING:
7070 l2cap_sig_channel(conn, skb);
7073 case L2CAP_CID_CONN_LESS:
7074 psm = get_unaligned((__le16 *) skb->data);
7075 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7076 l2cap_conless_channel(conn, psm, skb);
7080 l2cap_att_channel(conn, skb);
7083 case L2CAP_CID_LE_SIGNALING:
7084 l2cap_le_sig_channel(conn, skb);
7088 if (smp_sig_channel(conn, skb))
7089 l2cap_conn_del(conn->hcon, EACCES);
7093 l2cap_data_channel(conn, cid, skb);
7098 /* ---- L2CAP interface with lower layer (HCI) ---- */
7100 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7102 int exact = 0, lm1 = 0, lm2 = 0;
7103 struct l2cap_chan *c;
7105 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7107 /* Find listening sockets and check their link_mode */
7108 read_lock(&chan_list_lock);
7109 list_for_each_entry(c, &chan_list, global_l) {
7110 if (c->state != BT_LISTEN)
7113 if (!bacmp(&c->src, &hdev->bdaddr)) {
7114 lm1 |= HCI_LM_ACCEPT;
7115 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7116 lm1 |= HCI_LM_MASTER;
7118 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7119 lm2 |= HCI_LM_ACCEPT;
7120 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7121 lm2 |= HCI_LM_MASTER;
7124 read_unlock(&chan_list_lock);
7126 return exact ? lm1 : lm2;
7129 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7131 struct l2cap_conn *conn;
7133 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7136 conn = l2cap_conn_add(hcon);
7138 l2cap_conn_ready(conn);
7140 l2cap_conn_del(hcon, bt_to_errno(status));
7144 int l2cap_disconn_ind(struct hci_conn *hcon)
7146 struct l2cap_conn *conn = hcon->l2cap_data;
7148 BT_DBG("hcon %p", hcon);
7151 return HCI_ERROR_REMOTE_USER_TERM;
7152 return conn->disc_reason;
7155 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7157 BT_DBG("hcon %p reason %d", hcon, reason);
7159 l2cap_conn_del(hcon, bt_to_errno(reason));
7162 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7164 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7167 if (encrypt == 0x00) {
7168 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7169 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7170 } else if (chan->sec_level == BT_SECURITY_HIGH)
7171 l2cap_chan_close(chan, ECONNREFUSED);
7173 if (chan->sec_level == BT_SECURITY_MEDIUM)
7174 __clear_chan_timer(chan);
7178 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7180 struct l2cap_conn *conn = hcon->l2cap_data;
7181 struct l2cap_chan *chan;
7186 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7188 if (hcon->type == LE_LINK) {
7189 if (!status && encrypt)
7190 smp_distribute_keys(conn, 0);
7191 cancel_delayed_work(&conn->security_timer);
7194 mutex_lock(&conn->chan_lock);
7196 list_for_each_entry(chan, &conn->chan_l, list) {
7197 l2cap_chan_lock(chan);
7199 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7200 state_to_string(chan->state));
7202 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7203 l2cap_chan_unlock(chan);
7207 if (chan->scid == L2CAP_CID_ATT) {
7208 if (!status && encrypt) {
7209 chan->sec_level = hcon->sec_level;
7210 l2cap_chan_ready(chan);
7213 l2cap_chan_unlock(chan);
7217 if (!__l2cap_no_conn_pending(chan)) {
7218 l2cap_chan_unlock(chan);
7222 if (!status && (chan->state == BT_CONNECTED ||
7223 chan->state == BT_CONFIG)) {
7224 chan->ops->resume(chan);
7225 l2cap_check_encryption(chan, encrypt);
7226 l2cap_chan_unlock(chan);
7230 if (chan->state == BT_CONNECT) {
7232 l2cap_start_connection(chan);
7234 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7235 } else if (chan->state == BT_CONNECT2) {
7236 struct l2cap_conn_rsp rsp;
7240 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7241 res = L2CAP_CR_PEND;
7242 stat = L2CAP_CS_AUTHOR_PEND;
7243 chan->ops->defer(chan);
7245 l2cap_state_change(chan, BT_CONFIG);
7246 res = L2CAP_CR_SUCCESS;
7247 stat = L2CAP_CS_NO_INFO;
7250 l2cap_state_change(chan, BT_DISCONN);
7251 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7252 res = L2CAP_CR_SEC_BLOCK;
7253 stat = L2CAP_CS_NO_INFO;
7256 rsp.scid = cpu_to_le16(chan->dcid);
7257 rsp.dcid = cpu_to_le16(chan->scid);
7258 rsp.result = cpu_to_le16(res);
7259 rsp.status = cpu_to_le16(stat);
7260 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7263 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7264 res == L2CAP_CR_SUCCESS) {
7266 set_bit(CONF_REQ_SENT, &chan->conf_state);
7267 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7269 l2cap_build_conf_req(chan, buf),
7271 chan->num_conf_req++;
7275 l2cap_chan_unlock(chan);
7278 mutex_unlock(&conn->chan_lock);
7283 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7285 struct l2cap_conn *conn = hcon->l2cap_data;
7286 struct l2cap_hdr *hdr;
7289 /* For AMP controller do not create l2cap conn */
7290 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7294 conn = l2cap_conn_add(hcon);
7299 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7303 case ACL_START_NO_FLUSH:
7306 BT_ERR("Unexpected start frame (len %d)", skb->len);
7307 kfree_skb(conn->rx_skb);
7308 conn->rx_skb = NULL;
7310 l2cap_conn_unreliable(conn, ECOMM);
7313 /* Start fragment always begin with Basic L2CAP header */
7314 if (skb->len < L2CAP_HDR_SIZE) {
7315 BT_ERR("Frame is too short (len %d)", skb->len);
7316 l2cap_conn_unreliable(conn, ECOMM);
7320 hdr = (struct l2cap_hdr *) skb->data;
7321 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7323 if (len == skb->len) {
7324 /* Complete frame received */
7325 l2cap_recv_frame(conn, skb);
7329 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7331 if (skb->len > len) {
7332 BT_ERR("Frame is too long (len %d, expected len %d)",
7334 l2cap_conn_unreliable(conn, ECOMM);
7338 /* Allocate skb for the complete frame (with header) */
7339 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7343 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7345 conn->rx_len = len - skb->len;
7349 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7351 if (!conn->rx_len) {
7352 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7353 l2cap_conn_unreliable(conn, ECOMM);
7357 if (skb->len > conn->rx_len) {
7358 BT_ERR("Fragment is too long (len %d, expected %d)",
7359 skb->len, conn->rx_len);
7360 kfree_skb(conn->rx_skb);
7361 conn->rx_skb = NULL;
7363 l2cap_conn_unreliable(conn, ECOMM);
7367 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7369 conn->rx_len -= skb->len;
7371 if (!conn->rx_len) {
7372 /* Complete frame received. l2cap_recv_frame
7373 * takes ownership of the skb so set the global
7374 * rx_skb pointer to NULL first.
7376 struct sk_buff *rx_skb = conn->rx_skb;
7377 conn->rx_skb = NULL;
7378 l2cap_recv_frame(conn, rx_skb);
7388 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7390 struct l2cap_chan *c;
7392 read_lock(&chan_list_lock);
7394 list_for_each_entry(c, &chan_list, global_l) {
7395 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7397 c->state, __le16_to_cpu(c->psm),
7398 c->scid, c->dcid, c->imtu, c->omtu,
7399 c->sec_level, c->mode);
7402 read_unlock(&chan_list_lock);
7407 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7409 return single_open(file, l2cap_debugfs_show, inode->i_private);
7412 static const struct file_operations l2cap_debugfs_fops = {
7413 .open = l2cap_debugfs_open,
7415 .llseek = seq_lseek,
7416 .release = single_release,
7419 static struct dentry *l2cap_debugfs;
7421 int __init l2cap_init(void)
7425 err = l2cap_init_sockets();
7429 if (IS_ERR_OR_NULL(bt_debugfs))
7432 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7433 NULL, &l2cap_debugfs_fops);
7438 void l2cap_exit(void)
7440 debugfs_remove(l2cap_debugfs);
7441 l2cap_cleanup_sockets();
7444 module_param(disable_ertm, bool, 0644);
7445 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");