2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
495 chan->imtu = L2CAP_DEFAULT_MTU;
496 chan->omtu = L2CAP_LE_MIN_MTU;
497 chan->mode = L2CAP_MODE_LE_FLOWCTL;
498 chan->tx_credits = 0;
499 chan->rx_credits = L2CAP_LE_MAX_CREDITS;
502 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
504 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
505 __le16_to_cpu(chan->psm), chan->dcid);
507 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
511 switch (chan->chan_type) {
512 case L2CAP_CHAN_CONN_ORIENTED:
513 if (conn->hcon->type == LE_LINK) {
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 if (chan->dcid == L2CAP_CID_ATT)
517 chan->scid = L2CAP_CID_ATT;
519 chan->scid = l2cap_alloc_cid(conn);
521 /* Alloc CID for connection-oriented socket */
522 chan->scid = l2cap_alloc_cid(conn);
523 chan->omtu = L2CAP_DEFAULT_MTU;
527 case L2CAP_CHAN_CONN_LESS:
528 /* Connectionless socket */
529 chan->scid = L2CAP_CID_CONN_LESS;
530 chan->dcid = L2CAP_CID_CONN_LESS;
531 chan->omtu = L2CAP_DEFAULT_MTU;
534 case L2CAP_CHAN_CONN_FIX_A2MP:
535 chan->scid = L2CAP_CID_A2MP;
536 chan->dcid = L2CAP_CID_A2MP;
537 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
538 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
542 /* Raw socket can send/recv signalling messages only */
543 chan->scid = L2CAP_CID_SIGNALING;
544 chan->dcid = L2CAP_CID_SIGNALING;
545 chan->omtu = L2CAP_DEFAULT_MTU;
548 chan->local_id = L2CAP_BESTEFFORT_ID;
549 chan->local_stype = L2CAP_SERV_BESTEFFORT;
550 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
551 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
552 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
553 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
555 l2cap_chan_hold(chan);
557 hci_conn_hold(conn->hcon);
559 list_add(&chan->list, &conn->chan_l);
562 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
564 mutex_lock(&conn->chan_lock);
565 __l2cap_chan_add(conn, chan);
566 mutex_unlock(&conn->chan_lock);
569 void l2cap_chan_del(struct l2cap_chan *chan, int err)
571 struct l2cap_conn *conn = chan->conn;
573 __clear_chan_timer(chan);
575 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 /* Delete from channel list */
580 list_del(&chan->list);
582 l2cap_chan_put(chan);
586 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
587 hci_conn_drop(conn->hcon);
589 if (mgr && mgr->bredr_chan == chan)
590 mgr->bredr_chan = NULL;
593 if (chan->hs_hchan) {
594 struct hci_chan *hs_hchan = chan->hs_hchan;
596 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
597 amp_disconnect_logical_link(hs_hchan);
600 chan->ops->teardown(chan, err);
602 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
606 case L2CAP_MODE_BASIC:
609 case L2CAP_MODE_LE_FLOWCTL:
612 case L2CAP_MODE_ERTM:
613 __clear_retrans_timer(chan);
614 __clear_monitor_timer(chan);
615 __clear_ack_timer(chan);
617 skb_queue_purge(&chan->srej_q);
619 l2cap_seq_list_free(&chan->srej_list);
620 l2cap_seq_list_free(&chan->retrans_list);
624 case L2CAP_MODE_STREAMING:
625 skb_queue_purge(&chan->tx_q);
632 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
634 struct l2cap_conn *conn = chan->conn;
635 struct l2cap_le_conn_rsp rsp;
638 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
639 result = L2CAP_CR_AUTHORIZATION;
641 result = L2CAP_CR_BAD_PSM;
643 l2cap_state_change(chan, BT_DISCONN);
645 rsp.dcid = cpu_to_le16(chan->scid);
646 rsp.mtu = cpu_to_le16(chan->imtu);
647 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
648 rsp.credits = cpu_to_le16(chan->rx_credits);
649 rsp.result = cpu_to_le16(result);
651 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
655 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
657 struct l2cap_conn *conn = chan->conn;
658 struct l2cap_conn_rsp rsp;
661 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
662 result = L2CAP_CR_SEC_BLOCK;
664 result = L2CAP_CR_BAD_PSM;
666 l2cap_state_change(chan, BT_DISCONN);
668 rsp.scid = cpu_to_le16(chan->dcid);
669 rsp.dcid = cpu_to_le16(chan->scid);
670 rsp.result = cpu_to_le16(result);
671 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
673 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
676 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
678 struct l2cap_conn *conn = chan->conn;
680 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
682 switch (chan->state) {
684 chan->ops->teardown(chan, 0);
689 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
690 * check for chan->psm.
692 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
693 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
694 l2cap_send_disconn_req(chan, reason);
696 l2cap_chan_del(chan, reason);
700 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
701 if (conn->hcon->type == ACL_LINK)
702 l2cap_chan_connect_reject(chan);
703 else if (conn->hcon->type == LE_LINK)
704 l2cap_chan_le_connect_reject(chan);
707 l2cap_chan_del(chan, reason);
712 l2cap_chan_del(chan, reason);
716 chan->ops->teardown(chan, 0);
721 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
723 switch (chan->chan_type) {
725 switch (chan->sec_level) {
726 case BT_SECURITY_HIGH:
727 return HCI_AT_DEDICATED_BONDING_MITM;
728 case BT_SECURITY_MEDIUM:
729 return HCI_AT_DEDICATED_BONDING;
731 return HCI_AT_NO_BONDING;
734 case L2CAP_CHAN_CONN_LESS:
735 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
736 if (chan->sec_level == BT_SECURITY_LOW)
737 chan->sec_level = BT_SECURITY_SDP;
739 if (chan->sec_level == BT_SECURITY_HIGH)
740 return HCI_AT_NO_BONDING_MITM;
742 return HCI_AT_NO_BONDING;
744 case L2CAP_CHAN_CONN_ORIENTED:
745 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
746 if (chan->sec_level == BT_SECURITY_LOW)
747 chan->sec_level = BT_SECURITY_SDP;
749 if (chan->sec_level == BT_SECURITY_HIGH)
750 return HCI_AT_NO_BONDING_MITM;
752 return HCI_AT_NO_BONDING;
756 switch (chan->sec_level) {
757 case BT_SECURITY_HIGH:
758 return HCI_AT_GENERAL_BONDING_MITM;
759 case BT_SECURITY_MEDIUM:
760 return HCI_AT_GENERAL_BONDING;
762 return HCI_AT_NO_BONDING;
768 /* Service level security */
769 int l2cap_chan_check_security(struct l2cap_chan *chan)
771 struct l2cap_conn *conn = chan->conn;
774 if (conn->hcon->type == LE_LINK)
775 return smp_conn_security(conn->hcon, chan->sec_level);
777 auth_type = l2cap_get_auth_type(chan);
779 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
782 static u8 l2cap_get_ident(struct l2cap_conn *conn)
786 /* Get next available identificator.
787 * 1 - 128 are used by kernel.
788 * 129 - 199 are reserved.
789 * 200 - 254 are used by utilities like l2ping, etc.
792 spin_lock(&conn->lock);
794 if (++conn->tx_ident > 128)
799 spin_unlock(&conn->lock);
804 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
807 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
810 BT_DBG("code 0x%2.2x", code);
815 if (lmp_no_flush_capable(conn->hcon->hdev))
816 flags = ACL_START_NO_FLUSH;
820 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
821 skb->priority = HCI_PRIO_MAX;
823 hci_send_acl(conn->hchan, skb, flags);
826 static bool __chan_is_moving(struct l2cap_chan *chan)
828 return chan->move_state != L2CAP_MOVE_STABLE &&
829 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
832 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
834 struct hci_conn *hcon = chan->conn->hcon;
837 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
840 if (chan->hs_hcon && !__chan_is_moving(chan)) {
842 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
849 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
850 lmp_no_flush_capable(hcon->hdev))
851 flags = ACL_START_NO_FLUSH;
855 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
856 hci_send_acl(chan->conn->hchan, skb, flags);
859 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
861 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
862 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
864 if (enh & L2CAP_CTRL_FRAME_TYPE) {
867 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
868 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
875 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
876 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
883 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
885 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
886 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
888 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
891 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
892 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
899 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
900 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
907 static inline void __unpack_control(struct l2cap_chan *chan,
910 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
911 __unpack_extended_control(get_unaligned_le32(skb->data),
912 &bt_cb(skb)->control);
913 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
915 __unpack_enhanced_control(get_unaligned_le16(skb->data),
916 &bt_cb(skb)->control);
917 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
921 static u32 __pack_extended_control(struct l2cap_ctrl *control)
925 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
926 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
928 if (control->sframe) {
929 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
930 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
931 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
933 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
934 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
940 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
944 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
945 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
947 if (control->sframe) {
948 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
949 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
950 packed |= L2CAP_CTRL_FRAME_TYPE;
952 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
953 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
959 static inline void __pack_control(struct l2cap_chan *chan,
960 struct l2cap_ctrl *control,
963 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
964 put_unaligned_le32(__pack_extended_control(control),
965 skb->data + L2CAP_HDR_SIZE);
967 put_unaligned_le16(__pack_enhanced_control(control),
968 skb->data + L2CAP_HDR_SIZE);
972 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
974 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
975 return L2CAP_EXT_HDR_SIZE;
977 return L2CAP_ENH_HDR_SIZE;
980 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
984 struct l2cap_hdr *lh;
985 int hlen = __ertm_hdr_size(chan);
987 if (chan->fcs == L2CAP_FCS_CRC16)
988 hlen += L2CAP_FCS_SIZE;
990 skb = bt_skb_alloc(hlen, GFP_KERNEL);
993 return ERR_PTR(-ENOMEM);
995 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
996 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
997 lh->cid = cpu_to_le16(chan->dcid);
999 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1000 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1002 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1004 if (chan->fcs == L2CAP_FCS_CRC16) {
1005 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1006 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1009 skb->priority = HCI_PRIO_MAX;
1013 static void l2cap_send_sframe(struct l2cap_chan *chan,
1014 struct l2cap_ctrl *control)
1016 struct sk_buff *skb;
1019 BT_DBG("chan %p, control %p", chan, control);
1021 if (!control->sframe)
1024 if (__chan_is_moving(chan))
1027 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1031 if (control->super == L2CAP_SUPER_RR)
1032 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1033 else if (control->super == L2CAP_SUPER_RNR)
1034 set_bit(CONN_RNR_SENT, &chan->conn_state);
1036 if (control->super != L2CAP_SUPER_SREJ) {
1037 chan->last_acked_seq = control->reqseq;
1038 __clear_ack_timer(chan);
1041 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1042 control->final, control->poll, control->super);
1044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1045 control_field = __pack_extended_control(control);
1047 control_field = __pack_enhanced_control(control);
1049 skb = l2cap_create_sframe_pdu(chan, control_field);
1051 l2cap_do_send(chan, skb);
1054 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1056 struct l2cap_ctrl control;
1058 BT_DBG("chan %p, poll %d", chan, poll);
1060 memset(&control, 0, sizeof(control));
1062 control.poll = poll;
1064 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1065 control.super = L2CAP_SUPER_RNR;
1067 control.super = L2CAP_SUPER_RR;
1069 control.reqseq = chan->buffer_seq;
1070 l2cap_send_sframe(chan, &control);
1073 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1075 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1078 static bool __amp_capable(struct l2cap_chan *chan)
1080 struct l2cap_conn *conn = chan->conn;
1081 struct hci_dev *hdev;
1082 bool amp_available = false;
1084 if (!conn->hs_enabled)
1087 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1090 read_lock(&hci_dev_list_lock);
1091 list_for_each_entry(hdev, &hci_dev_list, list) {
1092 if (hdev->amp_type != AMP_TYPE_BREDR &&
1093 test_bit(HCI_UP, &hdev->flags)) {
1094 amp_available = true;
1098 read_unlock(&hci_dev_list_lock);
1100 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1101 return amp_available;
1106 static bool l2cap_check_efs(struct l2cap_chan *chan)
1108 /* Check EFS parameters */
1112 void l2cap_send_conn_req(struct l2cap_chan *chan)
1114 struct l2cap_conn *conn = chan->conn;
1115 struct l2cap_conn_req req;
1117 req.scid = cpu_to_le16(chan->scid);
1118 req.psm = chan->psm;
1120 chan->ident = l2cap_get_ident(conn);
1122 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1124 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1127 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1129 struct l2cap_create_chan_req req;
1130 req.scid = cpu_to_le16(chan->scid);
1131 req.psm = chan->psm;
1132 req.amp_id = amp_id;
1134 chan->ident = l2cap_get_ident(chan->conn);
1136 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1140 static void l2cap_move_setup(struct l2cap_chan *chan)
1142 struct sk_buff *skb;
1144 BT_DBG("chan %p", chan);
1146 if (chan->mode != L2CAP_MODE_ERTM)
1149 __clear_retrans_timer(chan);
1150 __clear_monitor_timer(chan);
1151 __clear_ack_timer(chan);
1153 chan->retry_count = 0;
1154 skb_queue_walk(&chan->tx_q, skb) {
1155 if (bt_cb(skb)->control.retries)
1156 bt_cb(skb)->control.retries = 1;
1161 chan->expected_tx_seq = chan->buffer_seq;
1163 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1164 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1165 l2cap_seq_list_clear(&chan->retrans_list);
1166 l2cap_seq_list_clear(&chan->srej_list);
1167 skb_queue_purge(&chan->srej_q);
1169 chan->tx_state = L2CAP_TX_STATE_XMIT;
1170 chan->rx_state = L2CAP_RX_STATE_MOVE;
1172 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1175 static void l2cap_move_done(struct l2cap_chan *chan)
1177 u8 move_role = chan->move_role;
1178 BT_DBG("chan %p", chan);
1180 chan->move_state = L2CAP_MOVE_STABLE;
1181 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1183 if (chan->mode != L2CAP_MODE_ERTM)
1186 switch (move_role) {
1187 case L2CAP_MOVE_ROLE_INITIATOR:
1188 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1189 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1191 case L2CAP_MOVE_ROLE_RESPONDER:
1192 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1197 static void l2cap_chan_ready(struct l2cap_chan *chan)
1199 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1200 chan->conf_state = 0;
1201 __clear_chan_timer(chan);
1203 chan->state = BT_CONNECTED;
1205 chan->ops->ready(chan);
1208 static void l2cap_le_connect(struct l2cap_chan *chan)
1210 struct l2cap_conn *conn = chan->conn;
1211 struct l2cap_le_conn_req req;
1213 req.psm = chan->psm;
1214 req.scid = cpu_to_le16(chan->scid);
1215 req.mtu = cpu_to_le16(chan->imtu);
1216 req.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
1217 req.credits = cpu_to_le16(chan->rx_credits);
1219 chan->ident = l2cap_get_ident(conn);
1221 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1225 static void l2cap_le_start(struct l2cap_chan *chan)
1227 struct l2cap_conn *conn = chan->conn;
1229 if (!smp_conn_security(conn->hcon, chan->sec_level))
1233 l2cap_chan_ready(chan);
1237 if (chan->state == BT_CONNECT)
1238 l2cap_le_connect(chan);
1241 static void l2cap_start_connection(struct l2cap_chan *chan)
1243 if (__amp_capable(chan)) {
1244 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1245 a2mp_discover_amp(chan);
1246 } else if (chan->conn->hcon->type == LE_LINK) {
1247 l2cap_le_start(chan);
1249 l2cap_send_conn_req(chan);
1253 static void l2cap_do_start(struct l2cap_chan *chan)
1255 struct l2cap_conn *conn = chan->conn;
1257 if (conn->hcon->type == LE_LINK) {
1258 l2cap_le_start(chan);
1262 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1263 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1266 if (l2cap_chan_check_security(chan) &&
1267 __l2cap_no_conn_pending(chan)) {
1268 l2cap_start_connection(chan);
1271 struct l2cap_info_req req;
1272 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1274 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1275 conn->info_ident = l2cap_get_ident(conn);
1277 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1279 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1284 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1286 u32 local_feat_mask = l2cap_feat_mask;
1288 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1291 case L2CAP_MODE_ERTM:
1292 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1293 case L2CAP_MODE_STREAMING:
1294 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1300 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1302 struct l2cap_conn *conn = chan->conn;
1303 struct l2cap_disconn_req req;
1308 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1309 __clear_retrans_timer(chan);
1310 __clear_monitor_timer(chan);
1311 __clear_ack_timer(chan);
1314 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1315 l2cap_state_change(chan, BT_DISCONN);
1319 req.dcid = cpu_to_le16(chan->dcid);
1320 req.scid = cpu_to_le16(chan->scid);
1321 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1324 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1327 /* ---- L2CAP connections ---- */
1328 static void l2cap_conn_start(struct l2cap_conn *conn)
1330 struct l2cap_chan *chan, *tmp;
1332 BT_DBG("conn %p", conn);
1334 mutex_lock(&conn->chan_lock);
1336 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1337 l2cap_chan_lock(chan);
1339 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1340 l2cap_chan_unlock(chan);
1344 if (chan->state == BT_CONNECT) {
1345 if (!l2cap_chan_check_security(chan) ||
1346 !__l2cap_no_conn_pending(chan)) {
1347 l2cap_chan_unlock(chan);
1351 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1352 && test_bit(CONF_STATE2_DEVICE,
1353 &chan->conf_state)) {
1354 l2cap_chan_close(chan, ECONNRESET);
1355 l2cap_chan_unlock(chan);
1359 l2cap_start_connection(chan);
1361 } else if (chan->state == BT_CONNECT2) {
1362 struct l2cap_conn_rsp rsp;
1364 rsp.scid = cpu_to_le16(chan->dcid);
1365 rsp.dcid = cpu_to_le16(chan->scid);
1367 if (l2cap_chan_check_security(chan)) {
1368 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1369 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1370 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1371 chan->ops->defer(chan);
1374 l2cap_state_change(chan, BT_CONFIG);
1375 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1376 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1379 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1380 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1383 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1386 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1387 rsp.result != L2CAP_CR_SUCCESS) {
1388 l2cap_chan_unlock(chan);
1392 set_bit(CONF_REQ_SENT, &chan->conf_state);
1393 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1394 l2cap_build_conf_req(chan, buf), buf);
1395 chan->num_conf_req++;
1398 l2cap_chan_unlock(chan);
1401 mutex_unlock(&conn->chan_lock);
1404 /* Find socket with cid and source/destination bdaddr.
1405 * Returns closest match, locked.
1407 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1411 struct l2cap_chan *c, *c1 = NULL;
1413 read_lock(&chan_list_lock);
1415 list_for_each_entry(c, &chan_list, global_l) {
1416 if (state && c->state != state)
1419 if (c->scid == cid) {
1420 int src_match, dst_match;
1421 int src_any, dst_any;
1424 src_match = !bacmp(&c->src, src);
1425 dst_match = !bacmp(&c->dst, dst);
1426 if (src_match && dst_match) {
1427 read_unlock(&chan_list_lock);
1432 src_any = !bacmp(&c->src, BDADDR_ANY);
1433 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1434 if ((src_match && dst_any) || (src_any && dst_match) ||
1435 (src_any && dst_any))
1440 read_unlock(&chan_list_lock);
1445 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1447 struct hci_conn *hcon = conn->hcon;
1448 struct l2cap_chan *chan, *pchan;
1453 /* Check if we have socket listening on cid */
1454 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1455 &hcon->src, &hcon->dst);
1459 /* Client ATT sockets should override the server one */
1460 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1463 dst_type = bdaddr_type(hcon, hcon->dst_type);
1465 /* If device is blocked, do not create a channel for it */
1466 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1469 l2cap_chan_lock(pchan);
1471 chan = pchan->ops->new_connection(pchan);
1475 chan->dcid = L2CAP_CID_ATT;
1477 bacpy(&chan->src, &hcon->src);
1478 bacpy(&chan->dst, &hcon->dst);
1479 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1480 chan->dst_type = dst_type;
1482 __l2cap_chan_add(conn, chan);
1485 l2cap_chan_unlock(pchan);
1488 static void l2cap_conn_ready(struct l2cap_conn *conn)
1490 struct l2cap_chan *chan;
1491 struct hci_conn *hcon = conn->hcon;
1493 BT_DBG("conn %p", conn);
1495 /* For outgoing pairing which doesn't necessarily have an
1496 * associated socket (e.g. mgmt_pair_device).
1498 if (hcon->out && hcon->type == LE_LINK)
1499 smp_conn_security(hcon, hcon->pending_sec_level);
1501 mutex_lock(&conn->chan_lock);
1503 if (hcon->type == LE_LINK)
1504 l2cap_le_conn_ready(conn);
1506 list_for_each_entry(chan, &conn->chan_l, list) {
1508 l2cap_chan_lock(chan);
1510 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1511 l2cap_chan_unlock(chan);
1515 if (hcon->type == LE_LINK) {
1516 l2cap_le_start(chan);
1517 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1518 l2cap_chan_ready(chan);
1520 } else if (chan->state == BT_CONNECT) {
1521 l2cap_do_start(chan);
1524 l2cap_chan_unlock(chan);
1527 mutex_unlock(&conn->chan_lock);
1530 /* Notify sockets that we cannot guaranty reliability anymore */
1531 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1533 struct l2cap_chan *chan;
1535 BT_DBG("conn %p", conn);
1537 mutex_lock(&conn->chan_lock);
1539 list_for_each_entry(chan, &conn->chan_l, list) {
1540 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1541 l2cap_chan_set_err(chan, err);
1544 mutex_unlock(&conn->chan_lock);
1547 static void l2cap_info_timeout(struct work_struct *work)
1549 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1552 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1553 conn->info_ident = 0;
1555 l2cap_conn_start(conn);
1560 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1561 * callback is called during registration. The ->remove callback is called
1562 * during unregistration.
1563 * An l2cap_user object can either be explicitly unregistered or when the
1564 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1565 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1566 * External modules must own a reference to the l2cap_conn object if they intend
1567 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1568 * any time if they don't.
1571 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1573 struct hci_dev *hdev = conn->hcon->hdev;
1576 /* We need to check whether l2cap_conn is registered. If it is not, we
1577 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1578 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1579 * relies on the parent hci_conn object to be locked. This itself relies
1580 * on the hci_dev object to be locked. So we must lock the hci device
1585 if (user->list.next || user->list.prev) {
1590 /* conn->hchan is NULL after l2cap_conn_del() was called */
1596 ret = user->probe(conn, user);
1600 list_add(&user->list, &conn->users);
1604 hci_dev_unlock(hdev);
1607 EXPORT_SYMBOL(l2cap_register_user);
1609 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1611 struct hci_dev *hdev = conn->hcon->hdev;
1615 if (!user->list.next || !user->list.prev)
1618 list_del(&user->list);
1619 user->list.next = NULL;
1620 user->list.prev = NULL;
1621 user->remove(conn, user);
1624 hci_dev_unlock(hdev);
1626 EXPORT_SYMBOL(l2cap_unregister_user);
1628 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1630 struct l2cap_user *user;
1632 while (!list_empty(&conn->users)) {
1633 user = list_first_entry(&conn->users, struct l2cap_user, list);
1634 list_del(&user->list);
1635 user->list.next = NULL;
1636 user->list.prev = NULL;
1637 user->remove(conn, user);
1641 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1643 struct l2cap_conn *conn = hcon->l2cap_data;
1644 struct l2cap_chan *chan, *l;
1649 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1651 kfree_skb(conn->rx_skb);
1653 l2cap_unregister_all_users(conn);
1655 mutex_lock(&conn->chan_lock);
1658 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1659 l2cap_chan_hold(chan);
1660 l2cap_chan_lock(chan);
1662 l2cap_chan_del(chan, err);
1664 l2cap_chan_unlock(chan);
1666 chan->ops->close(chan);
1667 l2cap_chan_put(chan);
1670 mutex_unlock(&conn->chan_lock);
1672 hci_chan_del(conn->hchan);
1674 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1675 cancel_delayed_work_sync(&conn->info_timer);
1677 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1678 cancel_delayed_work_sync(&conn->security_timer);
1679 smp_chan_destroy(conn);
1682 hcon->l2cap_data = NULL;
1684 l2cap_conn_put(conn);
1687 static void security_timeout(struct work_struct *work)
1689 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1690 security_timer.work);
1692 BT_DBG("conn %p", conn);
1694 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1695 smp_chan_destroy(conn);
1696 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1700 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1702 struct l2cap_conn *conn = hcon->l2cap_data;
1703 struct hci_chan *hchan;
1708 hchan = hci_chan_create(hcon);
1712 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1714 hci_chan_del(hchan);
1718 kref_init(&conn->ref);
1719 hcon->l2cap_data = conn;
1721 hci_conn_get(conn->hcon);
1722 conn->hchan = hchan;
1724 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1726 switch (hcon->type) {
1728 if (hcon->hdev->le_mtu) {
1729 conn->mtu = hcon->hdev->le_mtu;
1734 conn->mtu = hcon->hdev->acl_mtu;
1738 conn->feat_mask = 0;
1740 if (hcon->type == ACL_LINK)
1741 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1742 &hcon->hdev->dev_flags);
1744 spin_lock_init(&conn->lock);
1745 mutex_init(&conn->chan_lock);
1747 INIT_LIST_HEAD(&conn->chan_l);
1748 INIT_LIST_HEAD(&conn->users);
1750 if (hcon->type == LE_LINK)
1751 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1753 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1755 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1760 static void l2cap_conn_free(struct kref *ref)
1762 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1764 hci_conn_put(conn->hcon);
1768 void l2cap_conn_get(struct l2cap_conn *conn)
1770 kref_get(&conn->ref);
1772 EXPORT_SYMBOL(l2cap_conn_get);
1774 void l2cap_conn_put(struct l2cap_conn *conn)
1776 kref_put(&conn->ref, l2cap_conn_free);
1778 EXPORT_SYMBOL(l2cap_conn_put);
1780 /* ---- Socket interface ---- */
1782 /* Find socket with psm and source / destination bdaddr.
1783 * Returns closest match.
1785 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1790 struct l2cap_chan *c, *c1 = NULL;
1792 read_lock(&chan_list_lock);
1794 list_for_each_entry(c, &chan_list, global_l) {
1795 if (state && c->state != state)
1798 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1801 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1804 if (c->psm == psm) {
1805 int src_match, dst_match;
1806 int src_any, dst_any;
1809 src_match = !bacmp(&c->src, src);
1810 dst_match = !bacmp(&c->dst, dst);
1811 if (src_match && dst_match) {
1812 read_unlock(&chan_list_lock);
1817 src_any = !bacmp(&c->src, BDADDR_ANY);
1818 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1819 if ((src_match && dst_any) || (src_any && dst_match) ||
1820 (src_any && dst_any))
1825 read_unlock(&chan_list_lock);
1830 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1831 bdaddr_t *dst, u8 dst_type)
1833 struct l2cap_conn *conn;
1834 struct hci_conn *hcon;
1835 struct hci_dev *hdev;
1839 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1840 dst_type, __le16_to_cpu(psm));
1842 hdev = hci_get_route(dst, &chan->src);
1844 return -EHOSTUNREACH;
1848 l2cap_chan_lock(chan);
1850 /* PSM must be odd and lsb of upper byte must be 0 */
1851 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1852 chan->chan_type != L2CAP_CHAN_RAW) {
1857 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1862 switch (chan->mode) {
1863 case L2CAP_MODE_BASIC:
1864 case L2CAP_MODE_LE_FLOWCTL:
1866 case L2CAP_MODE_ERTM:
1867 case L2CAP_MODE_STREAMING:
1876 switch (chan->state) {
1880 /* Already connecting */
1885 /* Already connected */
1899 /* Set destination address and psm */
1900 bacpy(&chan->dst, dst);
1901 chan->dst_type = dst_type;
1906 auth_type = l2cap_get_auth_type(chan);
1908 if (bdaddr_type_is_le(dst_type))
1909 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1910 chan->sec_level, auth_type);
1912 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1913 chan->sec_level, auth_type);
1916 err = PTR_ERR(hcon);
1920 conn = l2cap_conn_add(hcon);
1922 hci_conn_drop(hcon);
1927 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1928 hci_conn_drop(hcon);
1933 /* Update source addr of the socket */
1934 bacpy(&chan->src, &hcon->src);
1935 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1937 l2cap_chan_unlock(chan);
1938 l2cap_chan_add(conn, chan);
1939 l2cap_chan_lock(chan);
1941 /* l2cap_chan_add takes its own ref so we can drop this one */
1942 hci_conn_drop(hcon);
1944 l2cap_state_change(chan, BT_CONNECT);
1945 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1947 if (hcon->state == BT_CONNECTED) {
1948 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1949 __clear_chan_timer(chan);
1950 if (l2cap_chan_check_security(chan))
1951 l2cap_state_change(chan, BT_CONNECTED);
1953 l2cap_do_start(chan);
1959 l2cap_chan_unlock(chan);
1960 hci_dev_unlock(hdev);
1965 static void l2cap_monitor_timeout(struct work_struct *work)
1967 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1968 monitor_timer.work);
1970 BT_DBG("chan %p", chan);
1972 l2cap_chan_lock(chan);
1975 l2cap_chan_unlock(chan);
1976 l2cap_chan_put(chan);
1980 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1982 l2cap_chan_unlock(chan);
1983 l2cap_chan_put(chan);
1986 static void l2cap_retrans_timeout(struct work_struct *work)
1988 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1989 retrans_timer.work);
1991 BT_DBG("chan %p", chan);
1993 l2cap_chan_lock(chan);
1996 l2cap_chan_unlock(chan);
1997 l2cap_chan_put(chan);
2001 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2002 l2cap_chan_unlock(chan);
2003 l2cap_chan_put(chan);
2006 static void l2cap_streaming_send(struct l2cap_chan *chan,
2007 struct sk_buff_head *skbs)
2009 struct sk_buff *skb;
2010 struct l2cap_ctrl *control;
2012 BT_DBG("chan %p, skbs %p", chan, skbs);
2014 if (__chan_is_moving(chan))
2017 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2019 while (!skb_queue_empty(&chan->tx_q)) {
2021 skb = skb_dequeue(&chan->tx_q);
2023 bt_cb(skb)->control.retries = 1;
2024 control = &bt_cb(skb)->control;
2026 control->reqseq = 0;
2027 control->txseq = chan->next_tx_seq;
2029 __pack_control(chan, control, skb);
2031 if (chan->fcs == L2CAP_FCS_CRC16) {
2032 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2033 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2036 l2cap_do_send(chan, skb);
2038 BT_DBG("Sent txseq %u", control->txseq);
2040 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2041 chan->frames_sent++;
2045 static int l2cap_ertm_send(struct l2cap_chan *chan)
2047 struct sk_buff *skb, *tx_skb;
2048 struct l2cap_ctrl *control;
2051 BT_DBG("chan %p", chan);
2053 if (chan->state != BT_CONNECTED)
2056 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2059 if (__chan_is_moving(chan))
2062 while (chan->tx_send_head &&
2063 chan->unacked_frames < chan->remote_tx_win &&
2064 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2066 skb = chan->tx_send_head;
2068 bt_cb(skb)->control.retries = 1;
2069 control = &bt_cb(skb)->control;
2071 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2074 control->reqseq = chan->buffer_seq;
2075 chan->last_acked_seq = chan->buffer_seq;
2076 control->txseq = chan->next_tx_seq;
2078 __pack_control(chan, control, skb);
2080 if (chan->fcs == L2CAP_FCS_CRC16) {
2081 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2082 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2085 /* Clone after data has been modified. Data is assumed to be
2086 read-only (for locking purposes) on cloned sk_buffs.
2088 tx_skb = skb_clone(skb, GFP_KERNEL);
2093 __set_retrans_timer(chan);
2095 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2096 chan->unacked_frames++;
2097 chan->frames_sent++;
2100 if (skb_queue_is_last(&chan->tx_q, skb))
2101 chan->tx_send_head = NULL;
2103 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2105 l2cap_do_send(chan, tx_skb);
2106 BT_DBG("Sent txseq %u", control->txseq);
2109 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2110 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2115 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2117 struct l2cap_ctrl control;
2118 struct sk_buff *skb;
2119 struct sk_buff *tx_skb;
2122 BT_DBG("chan %p", chan);
2124 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2127 if (__chan_is_moving(chan))
2130 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2131 seq = l2cap_seq_list_pop(&chan->retrans_list);
2133 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2135 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2140 bt_cb(skb)->control.retries++;
2141 control = bt_cb(skb)->control;
2143 if (chan->max_tx != 0 &&
2144 bt_cb(skb)->control.retries > chan->max_tx) {
2145 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2146 l2cap_send_disconn_req(chan, ECONNRESET);
2147 l2cap_seq_list_clear(&chan->retrans_list);
2151 control.reqseq = chan->buffer_seq;
2152 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2157 if (skb_cloned(skb)) {
2158 /* Cloned sk_buffs are read-only, so we need a
2161 tx_skb = skb_copy(skb, GFP_KERNEL);
2163 tx_skb = skb_clone(skb, GFP_KERNEL);
2167 l2cap_seq_list_clear(&chan->retrans_list);
2171 /* Update skb contents */
2172 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2173 put_unaligned_le32(__pack_extended_control(&control),
2174 tx_skb->data + L2CAP_HDR_SIZE);
2176 put_unaligned_le16(__pack_enhanced_control(&control),
2177 tx_skb->data + L2CAP_HDR_SIZE);
2180 if (chan->fcs == L2CAP_FCS_CRC16) {
2181 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2182 put_unaligned_le16(fcs, skb_put(tx_skb,
2186 l2cap_do_send(chan, tx_skb);
2188 BT_DBG("Resent txseq %d", control.txseq);
2190 chan->last_acked_seq = chan->buffer_seq;
2194 static void l2cap_retransmit(struct l2cap_chan *chan,
2195 struct l2cap_ctrl *control)
2197 BT_DBG("chan %p, control %p", chan, control);
2199 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2200 l2cap_ertm_resend(chan);
2203 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2204 struct l2cap_ctrl *control)
2206 struct sk_buff *skb;
2208 BT_DBG("chan %p, control %p", chan, control);
2211 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2213 l2cap_seq_list_clear(&chan->retrans_list);
2215 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2218 if (chan->unacked_frames) {
2219 skb_queue_walk(&chan->tx_q, skb) {
2220 if (bt_cb(skb)->control.txseq == control->reqseq ||
2221 skb == chan->tx_send_head)
2225 skb_queue_walk_from(&chan->tx_q, skb) {
2226 if (skb == chan->tx_send_head)
2229 l2cap_seq_list_append(&chan->retrans_list,
2230 bt_cb(skb)->control.txseq);
2233 l2cap_ertm_resend(chan);
2237 static void l2cap_send_ack(struct l2cap_chan *chan)
2239 struct l2cap_ctrl control;
2240 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2241 chan->last_acked_seq);
2244 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2245 chan, chan->last_acked_seq, chan->buffer_seq);
2247 memset(&control, 0, sizeof(control));
2250 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2251 chan->rx_state == L2CAP_RX_STATE_RECV) {
2252 __clear_ack_timer(chan);
2253 control.super = L2CAP_SUPER_RNR;
2254 control.reqseq = chan->buffer_seq;
2255 l2cap_send_sframe(chan, &control);
2257 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2258 l2cap_ertm_send(chan);
2259 /* If any i-frames were sent, they included an ack */
2260 if (chan->buffer_seq == chan->last_acked_seq)
2264 /* Ack now if the window is 3/4ths full.
2265 * Calculate without mul or div
2267 threshold = chan->ack_win;
2268 threshold += threshold << 1;
2271 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2274 if (frames_to_ack >= threshold) {
2275 __clear_ack_timer(chan);
2276 control.super = L2CAP_SUPER_RR;
2277 control.reqseq = chan->buffer_seq;
2278 l2cap_send_sframe(chan, &control);
2283 __set_ack_timer(chan);
2287 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2288 struct msghdr *msg, int len,
2289 int count, struct sk_buff *skb)
2291 struct l2cap_conn *conn = chan->conn;
2292 struct sk_buff **frag;
2295 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2301 /* Continuation fragments (no L2CAP header) */
2302 frag = &skb_shinfo(skb)->frag_list;
2304 struct sk_buff *tmp;
2306 count = min_t(unsigned int, conn->mtu, len);
2308 tmp = chan->ops->alloc_skb(chan, count,
2309 msg->msg_flags & MSG_DONTWAIT);
2311 return PTR_ERR(tmp);
2315 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2318 (*frag)->priority = skb->priority;
2323 skb->len += (*frag)->len;
2324 skb->data_len += (*frag)->len;
2326 frag = &(*frag)->next;
2332 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2333 struct msghdr *msg, size_t len,
2336 struct l2cap_conn *conn = chan->conn;
2337 struct sk_buff *skb;
2338 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2339 struct l2cap_hdr *lh;
2341 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2342 __le16_to_cpu(chan->psm), len, priority);
2344 count = min_t(unsigned int, (conn->mtu - hlen), len);
2346 skb = chan->ops->alloc_skb(chan, count + hlen,
2347 msg->msg_flags & MSG_DONTWAIT);
2351 skb->priority = priority;
2353 /* Create L2CAP header */
2354 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2355 lh->cid = cpu_to_le16(chan->dcid);
2356 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2357 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2359 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2360 if (unlikely(err < 0)) {
2362 return ERR_PTR(err);
2367 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2368 struct msghdr *msg, size_t len,
2371 struct l2cap_conn *conn = chan->conn;
2372 struct sk_buff *skb;
2374 struct l2cap_hdr *lh;
2376 BT_DBG("chan %p len %zu", chan, len);
2378 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2380 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2381 msg->msg_flags & MSG_DONTWAIT);
2385 skb->priority = priority;
2387 /* Create L2CAP header */
2388 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2389 lh->cid = cpu_to_le16(chan->dcid);
2390 lh->len = cpu_to_le16(len);
2392 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2393 if (unlikely(err < 0)) {
2395 return ERR_PTR(err);
2400 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2401 struct msghdr *msg, size_t len,
2404 struct l2cap_conn *conn = chan->conn;
2405 struct sk_buff *skb;
2406 int err, count, hlen;
2407 struct l2cap_hdr *lh;
2409 BT_DBG("chan %p len %zu", chan, len);
2412 return ERR_PTR(-ENOTCONN);
2414 hlen = __ertm_hdr_size(chan);
2417 hlen += L2CAP_SDULEN_SIZE;
2419 if (chan->fcs == L2CAP_FCS_CRC16)
2420 hlen += L2CAP_FCS_SIZE;
2422 count = min_t(unsigned int, (conn->mtu - hlen), len);
2424 skb = chan->ops->alloc_skb(chan, count + hlen,
2425 msg->msg_flags & MSG_DONTWAIT);
2429 /* Create L2CAP header */
2430 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2431 lh->cid = cpu_to_le16(chan->dcid);
2432 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2434 /* Control header is populated later */
2435 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2436 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2438 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2441 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2443 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2444 if (unlikely(err < 0)) {
2446 return ERR_PTR(err);
2449 bt_cb(skb)->control.fcs = chan->fcs;
2450 bt_cb(skb)->control.retries = 0;
2454 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2455 struct sk_buff_head *seg_queue,
2456 struct msghdr *msg, size_t len)
2458 struct sk_buff *skb;
2463 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2465 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2466 * so fragmented skbs are not used. The HCI layer's handling
2467 * of fragmented skbs is not compatible with ERTM's queueing.
2470 /* PDU size is derived from the HCI MTU */
2471 pdu_len = chan->conn->mtu;
2473 /* Constrain PDU size for BR/EDR connections */
2475 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2477 /* Adjust for largest possible L2CAP overhead. */
2479 pdu_len -= L2CAP_FCS_SIZE;
2481 pdu_len -= __ertm_hdr_size(chan);
2483 /* Remote device may have requested smaller PDUs */
2484 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2486 if (len <= pdu_len) {
2487 sar = L2CAP_SAR_UNSEGMENTED;
2491 sar = L2CAP_SAR_START;
2493 pdu_len -= L2CAP_SDULEN_SIZE;
2497 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2500 __skb_queue_purge(seg_queue);
2501 return PTR_ERR(skb);
2504 bt_cb(skb)->control.sar = sar;
2505 __skb_queue_tail(seg_queue, skb);
2510 pdu_len += L2CAP_SDULEN_SIZE;
2513 if (len <= pdu_len) {
2514 sar = L2CAP_SAR_END;
2517 sar = L2CAP_SAR_CONTINUE;
2524 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2527 struct sk_buff *skb;
2529 struct sk_buff_head seg_queue;
2534 /* Connectionless channel */
2535 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2536 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2538 return PTR_ERR(skb);
2540 l2cap_do_send(chan, skb);
2544 switch (chan->mode) {
2545 case L2CAP_MODE_BASIC:
2546 case L2CAP_MODE_LE_FLOWCTL:
2547 /* Check outgoing MTU */
2548 if (len > chan->omtu)
2551 /* Create a basic PDU */
2552 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2554 return PTR_ERR(skb);
2556 l2cap_do_send(chan, skb);
2560 case L2CAP_MODE_ERTM:
2561 case L2CAP_MODE_STREAMING:
2562 /* Check outgoing MTU */
2563 if (len > chan->omtu) {
2568 __skb_queue_head_init(&seg_queue);
2570 /* Do segmentation before calling in to the state machine,
2571 * since it's possible to block while waiting for memory
2574 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2576 /* The channel could have been closed while segmenting,
2577 * check that it is still connected.
2579 if (chan->state != BT_CONNECTED) {
2580 __skb_queue_purge(&seg_queue);
2587 if (chan->mode == L2CAP_MODE_ERTM)
2588 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2590 l2cap_streaming_send(chan, &seg_queue);
2594 /* If the skbs were not queued for sending, they'll still be in
2595 * seg_queue and need to be purged.
2597 __skb_queue_purge(&seg_queue);
2601 BT_DBG("bad state %1.1x", chan->mode);
2608 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2610 struct l2cap_ctrl control;
2613 BT_DBG("chan %p, txseq %u", chan, txseq);
2615 memset(&control, 0, sizeof(control));
2617 control.super = L2CAP_SUPER_SREJ;
2619 for (seq = chan->expected_tx_seq; seq != txseq;
2620 seq = __next_seq(chan, seq)) {
2621 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2622 control.reqseq = seq;
2623 l2cap_send_sframe(chan, &control);
2624 l2cap_seq_list_append(&chan->srej_list, seq);
2628 chan->expected_tx_seq = __next_seq(chan, txseq);
2631 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2633 struct l2cap_ctrl control;
2635 BT_DBG("chan %p", chan);
2637 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2640 memset(&control, 0, sizeof(control));
2642 control.super = L2CAP_SUPER_SREJ;
2643 control.reqseq = chan->srej_list.tail;
2644 l2cap_send_sframe(chan, &control);
2647 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2649 struct l2cap_ctrl control;
2653 BT_DBG("chan %p, txseq %u", chan, txseq);
2655 memset(&control, 0, sizeof(control));
2657 control.super = L2CAP_SUPER_SREJ;
2659 /* Capture initial list head to allow only one pass through the list. */
2660 initial_head = chan->srej_list.head;
2663 seq = l2cap_seq_list_pop(&chan->srej_list);
2664 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2667 control.reqseq = seq;
2668 l2cap_send_sframe(chan, &control);
2669 l2cap_seq_list_append(&chan->srej_list, seq);
2670 } while (chan->srej_list.head != initial_head);
2673 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2675 struct sk_buff *acked_skb;
2678 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2680 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2683 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2684 chan->expected_ack_seq, chan->unacked_frames);
2686 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2687 ackseq = __next_seq(chan, ackseq)) {
2689 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2691 skb_unlink(acked_skb, &chan->tx_q);
2692 kfree_skb(acked_skb);
2693 chan->unacked_frames--;
2697 chan->expected_ack_seq = reqseq;
2699 if (chan->unacked_frames == 0)
2700 __clear_retrans_timer(chan);
2702 BT_DBG("unacked_frames %u", chan->unacked_frames);
2705 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2707 BT_DBG("chan %p", chan);
2709 chan->expected_tx_seq = chan->buffer_seq;
2710 l2cap_seq_list_clear(&chan->srej_list);
2711 skb_queue_purge(&chan->srej_q);
2712 chan->rx_state = L2CAP_RX_STATE_RECV;
2715 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2716 struct l2cap_ctrl *control,
2717 struct sk_buff_head *skbs, u8 event)
2719 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2723 case L2CAP_EV_DATA_REQUEST:
2724 if (chan->tx_send_head == NULL)
2725 chan->tx_send_head = skb_peek(skbs);
2727 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2728 l2cap_ertm_send(chan);
2730 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2731 BT_DBG("Enter LOCAL_BUSY");
2732 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2735 /* The SREJ_SENT state must be aborted if we are to
2736 * enter the LOCAL_BUSY state.
2738 l2cap_abort_rx_srej_sent(chan);
2741 l2cap_send_ack(chan);
2744 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2745 BT_DBG("Exit LOCAL_BUSY");
2746 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2748 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2749 struct l2cap_ctrl local_control;
2751 memset(&local_control, 0, sizeof(local_control));
2752 local_control.sframe = 1;
2753 local_control.super = L2CAP_SUPER_RR;
2754 local_control.poll = 1;
2755 local_control.reqseq = chan->buffer_seq;
2756 l2cap_send_sframe(chan, &local_control);
2758 chan->retry_count = 1;
2759 __set_monitor_timer(chan);
2760 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2763 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2764 l2cap_process_reqseq(chan, control->reqseq);
2766 case L2CAP_EV_EXPLICIT_POLL:
2767 l2cap_send_rr_or_rnr(chan, 1);
2768 chan->retry_count = 1;
2769 __set_monitor_timer(chan);
2770 __clear_ack_timer(chan);
2771 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2773 case L2CAP_EV_RETRANS_TO:
2774 l2cap_send_rr_or_rnr(chan, 1);
2775 chan->retry_count = 1;
2776 __set_monitor_timer(chan);
2777 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2779 case L2CAP_EV_RECV_FBIT:
2780 /* Nothing to process */
2787 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2788 struct l2cap_ctrl *control,
2789 struct sk_buff_head *skbs, u8 event)
2791 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2795 case L2CAP_EV_DATA_REQUEST:
2796 if (chan->tx_send_head == NULL)
2797 chan->tx_send_head = skb_peek(skbs);
2798 /* Queue data, but don't send. */
2799 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2801 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2802 BT_DBG("Enter LOCAL_BUSY");
2803 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2805 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2806 /* The SREJ_SENT state must be aborted if we are to
2807 * enter the LOCAL_BUSY state.
2809 l2cap_abort_rx_srej_sent(chan);
2812 l2cap_send_ack(chan);
2815 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2816 BT_DBG("Exit LOCAL_BUSY");
2817 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2819 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2820 struct l2cap_ctrl local_control;
2821 memset(&local_control, 0, sizeof(local_control));
2822 local_control.sframe = 1;
2823 local_control.super = L2CAP_SUPER_RR;
2824 local_control.poll = 1;
2825 local_control.reqseq = chan->buffer_seq;
2826 l2cap_send_sframe(chan, &local_control);
2828 chan->retry_count = 1;
2829 __set_monitor_timer(chan);
2830 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2833 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2834 l2cap_process_reqseq(chan, control->reqseq);
2838 case L2CAP_EV_RECV_FBIT:
2839 if (control && control->final) {
2840 __clear_monitor_timer(chan);
2841 if (chan->unacked_frames > 0)
2842 __set_retrans_timer(chan);
2843 chan->retry_count = 0;
2844 chan->tx_state = L2CAP_TX_STATE_XMIT;
2845 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2848 case L2CAP_EV_EXPLICIT_POLL:
2851 case L2CAP_EV_MONITOR_TO:
2852 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2853 l2cap_send_rr_or_rnr(chan, 1);
2854 __set_monitor_timer(chan);
2855 chan->retry_count++;
2857 l2cap_send_disconn_req(chan, ECONNABORTED);
2865 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2866 struct sk_buff_head *skbs, u8 event)
2868 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2869 chan, control, skbs, event, chan->tx_state);
2871 switch (chan->tx_state) {
2872 case L2CAP_TX_STATE_XMIT:
2873 l2cap_tx_state_xmit(chan, control, skbs, event);
2875 case L2CAP_TX_STATE_WAIT_F:
2876 l2cap_tx_state_wait_f(chan, control, skbs, event);
2884 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2885 struct l2cap_ctrl *control)
2887 BT_DBG("chan %p, control %p", chan, control);
2888 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2891 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2892 struct l2cap_ctrl *control)
2894 BT_DBG("chan %p, control %p", chan, control);
2895 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2898 /* Copy frame to all raw sockets on that connection */
2899 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2901 struct sk_buff *nskb;
2902 struct l2cap_chan *chan;
2904 BT_DBG("conn %p", conn);
2906 mutex_lock(&conn->chan_lock);
2908 list_for_each_entry(chan, &conn->chan_l, list) {
2909 if (chan->chan_type != L2CAP_CHAN_RAW)
2912 /* Don't send frame to the channel it came from */
2913 if (bt_cb(skb)->chan == chan)
2916 nskb = skb_clone(skb, GFP_KERNEL);
2919 if (chan->ops->recv(chan, nskb))
2923 mutex_unlock(&conn->chan_lock);
2926 /* ---- L2CAP signalling commands ---- */
2927 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2928 u8 ident, u16 dlen, void *data)
2930 struct sk_buff *skb, **frag;
2931 struct l2cap_cmd_hdr *cmd;
2932 struct l2cap_hdr *lh;
2935 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2936 conn, code, ident, dlen);
2938 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2941 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2942 count = min_t(unsigned int, conn->mtu, len);
2944 skb = bt_skb_alloc(count, GFP_KERNEL);
2948 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2949 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2951 if (conn->hcon->type == LE_LINK)
2952 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2954 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2956 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2959 cmd->len = cpu_to_le16(dlen);
2962 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2963 memcpy(skb_put(skb, count), data, count);
2969 /* Continuation fragments (no L2CAP header) */
2970 frag = &skb_shinfo(skb)->frag_list;
2972 count = min_t(unsigned int, conn->mtu, len);
2974 *frag = bt_skb_alloc(count, GFP_KERNEL);
2978 memcpy(skb_put(*frag, count), data, count);
2983 frag = &(*frag)->next;
2993 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2996 struct l2cap_conf_opt *opt = *ptr;
2999 len = L2CAP_CONF_OPT_SIZE + opt->len;
3007 *val = *((u8 *) opt->val);
3011 *val = get_unaligned_le16(opt->val);
3015 *val = get_unaligned_le32(opt->val);
3019 *val = (unsigned long) opt->val;
3023 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3027 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3029 struct l2cap_conf_opt *opt = *ptr;
3031 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3038 *((u8 *) opt->val) = val;
3042 put_unaligned_le16(val, opt->val);
3046 put_unaligned_le32(val, opt->val);
3050 memcpy(opt->val, (void *) val, len);
3054 *ptr += L2CAP_CONF_OPT_SIZE + len;
3057 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3059 struct l2cap_conf_efs efs;
3061 switch (chan->mode) {
3062 case L2CAP_MODE_ERTM:
3063 efs.id = chan->local_id;
3064 efs.stype = chan->local_stype;
3065 efs.msdu = cpu_to_le16(chan->local_msdu);
3066 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3067 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3068 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3071 case L2CAP_MODE_STREAMING:
3073 efs.stype = L2CAP_SERV_BESTEFFORT;
3074 efs.msdu = cpu_to_le16(chan->local_msdu);
3075 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3084 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3085 (unsigned long) &efs);
3088 static void l2cap_ack_timeout(struct work_struct *work)
3090 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3094 BT_DBG("chan %p", chan);
3096 l2cap_chan_lock(chan);
3098 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3099 chan->last_acked_seq);
3102 l2cap_send_rr_or_rnr(chan, 0);
3104 l2cap_chan_unlock(chan);
3105 l2cap_chan_put(chan);
3108 int l2cap_ertm_init(struct l2cap_chan *chan)
3112 chan->next_tx_seq = 0;
3113 chan->expected_tx_seq = 0;
3114 chan->expected_ack_seq = 0;
3115 chan->unacked_frames = 0;
3116 chan->buffer_seq = 0;
3117 chan->frames_sent = 0;
3118 chan->last_acked_seq = 0;
3120 chan->sdu_last_frag = NULL;
3123 skb_queue_head_init(&chan->tx_q);
3125 chan->local_amp_id = AMP_ID_BREDR;
3126 chan->move_id = AMP_ID_BREDR;
3127 chan->move_state = L2CAP_MOVE_STABLE;
3128 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3130 if (chan->mode != L2CAP_MODE_ERTM)
3133 chan->rx_state = L2CAP_RX_STATE_RECV;
3134 chan->tx_state = L2CAP_TX_STATE_XMIT;
3136 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3137 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3138 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3140 skb_queue_head_init(&chan->srej_q);
3142 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3146 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3148 l2cap_seq_list_free(&chan->srej_list);
3153 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3156 case L2CAP_MODE_STREAMING:
3157 case L2CAP_MODE_ERTM:
3158 if (l2cap_mode_supported(mode, remote_feat_mask))
3162 return L2CAP_MODE_BASIC;
3166 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3168 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3171 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3173 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3176 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3177 struct l2cap_conf_rfc *rfc)
3179 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3180 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3182 /* Class 1 devices have must have ERTM timeouts
3183 * exceeding the Link Supervision Timeout. The
3184 * default Link Supervision Timeout for AMP
3185 * controllers is 10 seconds.
3187 * Class 1 devices use 0xffffffff for their
3188 * best-effort flush timeout, so the clamping logic
3189 * will result in a timeout that meets the above
3190 * requirement. ERTM timeouts are 16-bit values, so
3191 * the maximum timeout is 65.535 seconds.
3194 /* Convert timeout to milliseconds and round */
3195 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3197 /* This is the recommended formula for class 2 devices
3198 * that start ERTM timers when packets are sent to the
3201 ertm_to = 3 * ertm_to + 500;
3203 if (ertm_to > 0xffff)
3206 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3207 rfc->monitor_timeout = rfc->retrans_timeout;
3209 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3210 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3214 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3216 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3217 __l2cap_ews_supported(chan->conn)) {
3218 /* use extended control field */
3219 set_bit(FLAG_EXT_CTRL, &chan->flags);
3220 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3222 chan->tx_win = min_t(u16, chan->tx_win,
3223 L2CAP_DEFAULT_TX_WINDOW);
3224 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3226 chan->ack_win = chan->tx_win;
3229 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3231 struct l2cap_conf_req *req = data;
3232 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3233 void *ptr = req->data;
3236 BT_DBG("chan %p", chan);
3238 if (chan->num_conf_req || chan->num_conf_rsp)
3241 switch (chan->mode) {
3242 case L2CAP_MODE_STREAMING:
3243 case L2CAP_MODE_ERTM:
3244 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3247 if (__l2cap_efs_supported(chan->conn))
3248 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3252 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3257 if (chan->imtu != L2CAP_DEFAULT_MTU)
3258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3260 switch (chan->mode) {
3261 case L2CAP_MODE_BASIC:
3262 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3263 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3266 rfc.mode = L2CAP_MODE_BASIC;
3268 rfc.max_transmit = 0;
3269 rfc.retrans_timeout = 0;
3270 rfc.monitor_timeout = 0;
3271 rfc.max_pdu_size = 0;
3273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3274 (unsigned long) &rfc);
3277 case L2CAP_MODE_ERTM:
3278 rfc.mode = L2CAP_MODE_ERTM;
3279 rfc.max_transmit = chan->max_tx;
3281 __l2cap_set_ertm_timeouts(chan, &rfc);
3283 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3284 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3286 rfc.max_pdu_size = cpu_to_le16(size);
3288 l2cap_txwin_setup(chan);
3290 rfc.txwin_size = min_t(u16, chan->tx_win,
3291 L2CAP_DEFAULT_TX_WINDOW);
3293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3294 (unsigned long) &rfc);
3296 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3297 l2cap_add_opt_efs(&ptr, chan);
3299 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3303 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3304 if (chan->fcs == L2CAP_FCS_NONE ||
3305 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3306 chan->fcs = L2CAP_FCS_NONE;
3307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3312 case L2CAP_MODE_STREAMING:
3313 l2cap_txwin_setup(chan);
3314 rfc.mode = L2CAP_MODE_STREAMING;
3316 rfc.max_transmit = 0;
3317 rfc.retrans_timeout = 0;
3318 rfc.monitor_timeout = 0;
3320 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3321 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3323 rfc.max_pdu_size = cpu_to_le16(size);
3325 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3326 (unsigned long) &rfc);
3328 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3329 l2cap_add_opt_efs(&ptr, chan);
3331 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3332 if (chan->fcs == L2CAP_FCS_NONE ||
3333 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3334 chan->fcs = L2CAP_FCS_NONE;
3335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3341 req->dcid = cpu_to_le16(chan->dcid);
3342 req->flags = __constant_cpu_to_le16(0);
3347 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3349 struct l2cap_conf_rsp *rsp = data;
3350 void *ptr = rsp->data;
3351 void *req = chan->conf_req;
3352 int len = chan->conf_len;
3353 int type, hint, olen;
3355 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3356 struct l2cap_conf_efs efs;
3358 u16 mtu = L2CAP_DEFAULT_MTU;
3359 u16 result = L2CAP_CONF_SUCCESS;
3362 BT_DBG("chan %p", chan);
3364 while (len >= L2CAP_CONF_OPT_SIZE) {
3365 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3367 hint = type & L2CAP_CONF_HINT;
3368 type &= L2CAP_CONF_MASK;
3371 case L2CAP_CONF_MTU:
3375 case L2CAP_CONF_FLUSH_TO:
3376 chan->flush_to = val;
3379 case L2CAP_CONF_QOS:
3382 case L2CAP_CONF_RFC:
3383 if (olen == sizeof(rfc))
3384 memcpy(&rfc, (void *) val, olen);
3387 case L2CAP_CONF_FCS:
3388 if (val == L2CAP_FCS_NONE)
3389 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3392 case L2CAP_CONF_EFS:
3394 if (olen == sizeof(efs))
3395 memcpy(&efs, (void *) val, olen);
3398 case L2CAP_CONF_EWS:
3399 if (!chan->conn->hs_enabled)
3400 return -ECONNREFUSED;
3402 set_bit(FLAG_EXT_CTRL, &chan->flags);
3403 set_bit(CONF_EWS_RECV, &chan->conf_state);
3404 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3405 chan->remote_tx_win = val;
3412 result = L2CAP_CONF_UNKNOWN;
3413 *((u8 *) ptr++) = type;
3418 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3421 switch (chan->mode) {
3422 case L2CAP_MODE_STREAMING:
3423 case L2CAP_MODE_ERTM:
3424 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3425 chan->mode = l2cap_select_mode(rfc.mode,
3426 chan->conn->feat_mask);
3431 if (__l2cap_efs_supported(chan->conn))
3432 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3434 return -ECONNREFUSED;
3437 if (chan->mode != rfc.mode)
3438 return -ECONNREFUSED;
3444 if (chan->mode != rfc.mode) {
3445 result = L2CAP_CONF_UNACCEPT;
3446 rfc.mode = chan->mode;
3448 if (chan->num_conf_rsp == 1)
3449 return -ECONNREFUSED;
3451 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3452 (unsigned long) &rfc);
3455 if (result == L2CAP_CONF_SUCCESS) {
3456 /* Configure output options and let the other side know
3457 * which ones we don't like. */
3459 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3460 result = L2CAP_CONF_UNACCEPT;
3463 set_bit(CONF_MTU_DONE, &chan->conf_state);
3465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3468 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3469 efs.stype != L2CAP_SERV_NOTRAFIC &&
3470 efs.stype != chan->local_stype) {
3472 result = L2CAP_CONF_UNACCEPT;
3474 if (chan->num_conf_req >= 1)
3475 return -ECONNREFUSED;
3477 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3479 (unsigned long) &efs);
3481 /* Send PENDING Conf Rsp */
3482 result = L2CAP_CONF_PENDING;
3483 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3488 case L2CAP_MODE_BASIC:
3489 chan->fcs = L2CAP_FCS_NONE;
3490 set_bit(CONF_MODE_DONE, &chan->conf_state);
3493 case L2CAP_MODE_ERTM:
3494 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3495 chan->remote_tx_win = rfc.txwin_size;
3497 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3499 chan->remote_max_tx = rfc.max_transmit;
3501 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3502 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3503 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3504 rfc.max_pdu_size = cpu_to_le16(size);
3505 chan->remote_mps = size;
3507 __l2cap_set_ertm_timeouts(chan, &rfc);
3509 set_bit(CONF_MODE_DONE, &chan->conf_state);
3511 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3512 sizeof(rfc), (unsigned long) &rfc);
3514 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3515 chan->remote_id = efs.id;
3516 chan->remote_stype = efs.stype;
3517 chan->remote_msdu = le16_to_cpu(efs.msdu);
3518 chan->remote_flush_to =
3519 le32_to_cpu(efs.flush_to);
3520 chan->remote_acc_lat =
3521 le32_to_cpu(efs.acc_lat);
3522 chan->remote_sdu_itime =
3523 le32_to_cpu(efs.sdu_itime);
3524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3526 (unsigned long) &efs);
3530 case L2CAP_MODE_STREAMING:
3531 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3532 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3533 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3534 rfc.max_pdu_size = cpu_to_le16(size);
3535 chan->remote_mps = size;
3537 set_bit(CONF_MODE_DONE, &chan->conf_state);
3539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3540 (unsigned long) &rfc);
3545 result = L2CAP_CONF_UNACCEPT;
3547 memset(&rfc, 0, sizeof(rfc));
3548 rfc.mode = chan->mode;
3551 if (result == L2CAP_CONF_SUCCESS)
3552 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3554 rsp->scid = cpu_to_le16(chan->dcid);
3555 rsp->result = cpu_to_le16(result);
3556 rsp->flags = __constant_cpu_to_le16(0);
3561 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3562 void *data, u16 *result)
3564 struct l2cap_conf_req *req = data;
3565 void *ptr = req->data;
3568 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3569 struct l2cap_conf_efs efs;
3571 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3573 while (len >= L2CAP_CONF_OPT_SIZE) {
3574 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3577 case L2CAP_CONF_MTU:
3578 if (val < L2CAP_DEFAULT_MIN_MTU) {
3579 *result = L2CAP_CONF_UNACCEPT;
3580 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3583 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3586 case L2CAP_CONF_FLUSH_TO:
3587 chan->flush_to = val;
3588 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3592 case L2CAP_CONF_RFC:
3593 if (olen == sizeof(rfc))
3594 memcpy(&rfc, (void *)val, olen);
3596 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3597 rfc.mode != chan->mode)
3598 return -ECONNREFUSED;
3602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3603 sizeof(rfc), (unsigned long) &rfc);
3606 case L2CAP_CONF_EWS:
3607 chan->ack_win = min_t(u16, val, chan->ack_win);
3608 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3612 case L2CAP_CONF_EFS:
3613 if (olen == sizeof(efs))
3614 memcpy(&efs, (void *)val, olen);
3616 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3617 efs.stype != L2CAP_SERV_NOTRAFIC &&
3618 efs.stype != chan->local_stype)
3619 return -ECONNREFUSED;
3621 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3622 (unsigned long) &efs);
3625 case L2CAP_CONF_FCS:
3626 if (*result == L2CAP_CONF_PENDING)
3627 if (val == L2CAP_FCS_NONE)
3628 set_bit(CONF_RECV_NO_FCS,
3634 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3635 return -ECONNREFUSED;
3637 chan->mode = rfc.mode;
3639 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3641 case L2CAP_MODE_ERTM:
3642 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3643 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3644 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3645 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3646 chan->ack_win = min_t(u16, chan->ack_win,
3649 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3650 chan->local_msdu = le16_to_cpu(efs.msdu);
3651 chan->local_sdu_itime =
3652 le32_to_cpu(efs.sdu_itime);
3653 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3654 chan->local_flush_to =
3655 le32_to_cpu(efs.flush_to);
3659 case L2CAP_MODE_STREAMING:
3660 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3664 req->dcid = cpu_to_le16(chan->dcid);
3665 req->flags = __constant_cpu_to_le16(0);
3670 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3671 u16 result, u16 flags)
3673 struct l2cap_conf_rsp *rsp = data;
3674 void *ptr = rsp->data;
3676 BT_DBG("chan %p", chan);
3678 rsp->scid = cpu_to_le16(chan->dcid);
3679 rsp->result = cpu_to_le16(result);
3680 rsp->flags = cpu_to_le16(flags);
3685 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3687 struct l2cap_le_conn_rsp rsp;
3688 struct l2cap_conn *conn = chan->conn;
3690 BT_DBG("chan %p", chan);
3692 rsp.dcid = cpu_to_le16(chan->scid);
3693 rsp.mtu = cpu_to_le16(chan->imtu);
3694 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
3695 rsp.credits = cpu_to_le16(chan->rx_credits);
3696 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3698 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3702 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3704 struct l2cap_conn_rsp rsp;
3705 struct l2cap_conn *conn = chan->conn;
3709 rsp.scid = cpu_to_le16(chan->dcid);
3710 rsp.dcid = cpu_to_le16(chan->scid);
3711 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3712 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3715 rsp_code = L2CAP_CREATE_CHAN_RSP;
3717 rsp_code = L2CAP_CONN_RSP;
3719 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3721 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3723 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3726 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3727 l2cap_build_conf_req(chan, buf), buf);
3728 chan->num_conf_req++;
3731 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3735 /* Use sane default values in case a misbehaving remote device
3736 * did not send an RFC or extended window size option.
3738 u16 txwin_ext = chan->ack_win;
3739 struct l2cap_conf_rfc rfc = {
3741 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3742 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3743 .max_pdu_size = cpu_to_le16(chan->imtu),
3744 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3747 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3749 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3752 while (len >= L2CAP_CONF_OPT_SIZE) {
3753 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3756 case L2CAP_CONF_RFC:
3757 if (olen == sizeof(rfc))
3758 memcpy(&rfc, (void *)val, olen);
3760 case L2CAP_CONF_EWS:
3767 case L2CAP_MODE_ERTM:
3768 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3769 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3770 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3771 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3772 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3774 chan->ack_win = min_t(u16, chan->ack_win,
3777 case L2CAP_MODE_STREAMING:
3778 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3782 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3783 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3786 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3788 if (cmd_len < sizeof(*rej))
3791 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3794 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3795 cmd->ident == conn->info_ident) {
3796 cancel_delayed_work(&conn->info_timer);
3798 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3799 conn->info_ident = 0;
3801 l2cap_conn_start(conn);
3807 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3808 struct l2cap_cmd_hdr *cmd,
3809 u8 *data, u8 rsp_code, u8 amp_id)
3811 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3812 struct l2cap_conn_rsp rsp;
3813 struct l2cap_chan *chan = NULL, *pchan;
3814 int result, status = L2CAP_CS_NO_INFO;
3816 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3817 __le16 psm = req->psm;
3819 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3821 /* Check if we have socket listening on psm */
3822 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3823 &conn->hcon->dst, ACL_LINK);
3825 result = L2CAP_CR_BAD_PSM;
3829 mutex_lock(&conn->chan_lock);
3830 l2cap_chan_lock(pchan);
3832 /* Check if the ACL is secure enough (if not SDP) */
3833 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3834 !hci_conn_check_link_mode(conn->hcon)) {
3835 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3836 result = L2CAP_CR_SEC_BLOCK;
3840 result = L2CAP_CR_NO_MEM;
3842 /* Check if we already have channel with that dcid */
3843 if (__l2cap_get_chan_by_dcid(conn, scid))
3846 chan = pchan->ops->new_connection(pchan);
3850 /* For certain devices (ex: HID mouse), support for authentication,
3851 * pairing and bonding is optional. For such devices, inorder to avoid
3852 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3853 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3855 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3857 bacpy(&chan->src, &conn->hcon->src);
3858 bacpy(&chan->dst, &conn->hcon->dst);
3859 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3860 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3863 chan->local_amp_id = amp_id;
3865 __l2cap_chan_add(conn, chan);
3869 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3871 chan->ident = cmd->ident;
3873 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3874 if (l2cap_chan_check_security(chan)) {
3875 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3876 l2cap_state_change(chan, BT_CONNECT2);
3877 result = L2CAP_CR_PEND;
3878 status = L2CAP_CS_AUTHOR_PEND;
3879 chan->ops->defer(chan);
3881 /* Force pending result for AMP controllers.
3882 * The connection will succeed after the
3883 * physical link is up.
3885 if (amp_id == AMP_ID_BREDR) {
3886 l2cap_state_change(chan, BT_CONFIG);
3887 result = L2CAP_CR_SUCCESS;
3889 l2cap_state_change(chan, BT_CONNECT2);
3890 result = L2CAP_CR_PEND;
3892 status = L2CAP_CS_NO_INFO;
3895 l2cap_state_change(chan, BT_CONNECT2);
3896 result = L2CAP_CR_PEND;
3897 status = L2CAP_CS_AUTHEN_PEND;
3900 l2cap_state_change(chan, BT_CONNECT2);
3901 result = L2CAP_CR_PEND;
3902 status = L2CAP_CS_NO_INFO;
3906 l2cap_chan_unlock(pchan);
3907 mutex_unlock(&conn->chan_lock);
3910 rsp.scid = cpu_to_le16(scid);
3911 rsp.dcid = cpu_to_le16(dcid);
3912 rsp.result = cpu_to_le16(result);
3913 rsp.status = cpu_to_le16(status);
3914 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3916 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3917 struct l2cap_info_req info;
3918 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3920 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3921 conn->info_ident = l2cap_get_ident(conn);
3923 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3925 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3926 sizeof(info), &info);
3929 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3930 result == L2CAP_CR_SUCCESS) {
3932 set_bit(CONF_REQ_SENT, &chan->conf_state);
3933 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3934 l2cap_build_conf_req(chan, buf), buf);
3935 chan->num_conf_req++;
3941 static int l2cap_connect_req(struct l2cap_conn *conn,
3942 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3944 struct hci_dev *hdev = conn->hcon->hdev;
3945 struct hci_conn *hcon = conn->hcon;
3947 if (cmd_len < sizeof(struct l2cap_conn_req))
3951 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3952 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3953 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3954 hcon->dst_type, 0, NULL, 0,
3956 hci_dev_unlock(hdev);
3958 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3962 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3963 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3966 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3967 u16 scid, dcid, result, status;
3968 struct l2cap_chan *chan;
3972 if (cmd_len < sizeof(*rsp))
3975 scid = __le16_to_cpu(rsp->scid);
3976 dcid = __le16_to_cpu(rsp->dcid);
3977 result = __le16_to_cpu(rsp->result);
3978 status = __le16_to_cpu(rsp->status);
3980 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3981 dcid, scid, result, status);
3983 mutex_lock(&conn->chan_lock);
3986 chan = __l2cap_get_chan_by_scid(conn, scid);
3992 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4001 l2cap_chan_lock(chan);
4004 case L2CAP_CR_SUCCESS:
4005 l2cap_state_change(chan, BT_CONFIG);
4008 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4010 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4013 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4014 l2cap_build_conf_req(chan, req), req);
4015 chan->num_conf_req++;
4019 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4023 l2cap_chan_del(chan, ECONNREFUSED);
4027 l2cap_chan_unlock(chan);
4030 mutex_unlock(&conn->chan_lock);
4035 static inline void set_default_fcs(struct l2cap_chan *chan)
4037 /* FCS is enabled only in ERTM or streaming mode, if one or both
4040 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4041 chan->fcs = L2CAP_FCS_NONE;
4042 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4043 chan->fcs = L2CAP_FCS_CRC16;
4046 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4047 u8 ident, u16 flags)
4049 struct l2cap_conn *conn = chan->conn;
4051 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4054 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4055 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4057 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4058 l2cap_build_conf_rsp(chan, data,
4059 L2CAP_CONF_SUCCESS, flags), data);
4062 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4065 struct l2cap_cmd_rej_cid rej;
4067 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4068 rej.scid = __cpu_to_le16(scid);
4069 rej.dcid = __cpu_to_le16(dcid);
4071 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4074 static inline int l2cap_config_req(struct l2cap_conn *conn,
4075 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4078 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4081 struct l2cap_chan *chan;
4084 if (cmd_len < sizeof(*req))
4087 dcid = __le16_to_cpu(req->dcid);
4088 flags = __le16_to_cpu(req->flags);
4090 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4092 chan = l2cap_get_chan_by_scid(conn, dcid);
4094 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4098 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4099 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4104 /* Reject if config buffer is too small. */
4105 len = cmd_len - sizeof(*req);
4106 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4107 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4108 l2cap_build_conf_rsp(chan, rsp,
4109 L2CAP_CONF_REJECT, flags), rsp);
4114 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4115 chan->conf_len += len;
4117 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4118 /* Incomplete config. Send empty response. */
4119 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4120 l2cap_build_conf_rsp(chan, rsp,
4121 L2CAP_CONF_SUCCESS, flags), rsp);
4125 /* Complete config. */
4126 len = l2cap_parse_conf_req(chan, rsp);
4128 l2cap_send_disconn_req(chan, ECONNRESET);
4132 chan->ident = cmd->ident;
4133 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4134 chan->num_conf_rsp++;
4136 /* Reset config buffer. */
4139 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4142 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4143 set_default_fcs(chan);
4145 if (chan->mode == L2CAP_MODE_ERTM ||
4146 chan->mode == L2CAP_MODE_STREAMING)
4147 err = l2cap_ertm_init(chan);
4150 l2cap_send_disconn_req(chan, -err);
4152 l2cap_chan_ready(chan);
4157 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4159 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4160 l2cap_build_conf_req(chan, buf), buf);
4161 chan->num_conf_req++;
4164 /* Got Conf Rsp PENDING from remote side and asume we sent
4165 Conf Rsp PENDING in the code above */
4166 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4167 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4169 /* check compatibility */
4171 /* Send rsp for BR/EDR channel */
4173 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4175 chan->ident = cmd->ident;
4179 l2cap_chan_unlock(chan);
4183 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4184 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4187 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4188 u16 scid, flags, result;
4189 struct l2cap_chan *chan;
4190 int len = cmd_len - sizeof(*rsp);
4193 if (cmd_len < sizeof(*rsp))
4196 scid = __le16_to_cpu(rsp->scid);
4197 flags = __le16_to_cpu(rsp->flags);
4198 result = __le16_to_cpu(rsp->result);
4200 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4203 chan = l2cap_get_chan_by_scid(conn, scid);
4208 case L2CAP_CONF_SUCCESS:
4209 l2cap_conf_rfc_get(chan, rsp->data, len);
4210 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4213 case L2CAP_CONF_PENDING:
4214 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4216 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4219 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4222 l2cap_send_disconn_req(chan, ECONNRESET);
4226 if (!chan->hs_hcon) {
4227 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4230 if (l2cap_check_efs(chan)) {
4231 amp_create_logical_link(chan);
4232 chan->ident = cmd->ident;
4238 case L2CAP_CONF_UNACCEPT:
4239 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4242 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4243 l2cap_send_disconn_req(chan, ECONNRESET);
4247 /* throw out any old stored conf requests */
4248 result = L2CAP_CONF_SUCCESS;
4249 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4252 l2cap_send_disconn_req(chan, ECONNRESET);
4256 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4257 L2CAP_CONF_REQ, len, req);
4258 chan->num_conf_req++;
4259 if (result != L2CAP_CONF_SUCCESS)
4265 l2cap_chan_set_err(chan, ECONNRESET);
4267 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4268 l2cap_send_disconn_req(chan, ECONNRESET);
4272 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4275 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4277 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4278 set_default_fcs(chan);
4280 if (chan->mode == L2CAP_MODE_ERTM ||
4281 chan->mode == L2CAP_MODE_STREAMING)
4282 err = l2cap_ertm_init(chan);
4285 l2cap_send_disconn_req(chan, -err);
4287 l2cap_chan_ready(chan);
4291 l2cap_chan_unlock(chan);
4295 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4296 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4299 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4300 struct l2cap_disconn_rsp rsp;
4302 struct l2cap_chan *chan;
4304 if (cmd_len != sizeof(*req))
4307 scid = __le16_to_cpu(req->scid);
4308 dcid = __le16_to_cpu(req->dcid);
4310 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4312 mutex_lock(&conn->chan_lock);
4314 chan = __l2cap_get_chan_by_scid(conn, dcid);
4316 mutex_unlock(&conn->chan_lock);
4317 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4321 l2cap_chan_lock(chan);
4323 rsp.dcid = cpu_to_le16(chan->scid);
4324 rsp.scid = cpu_to_le16(chan->dcid);
4325 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4327 chan->ops->set_shutdown(chan);
4329 l2cap_chan_hold(chan);
4330 l2cap_chan_del(chan, ECONNRESET);
4332 l2cap_chan_unlock(chan);
4334 chan->ops->close(chan);
4335 l2cap_chan_put(chan);
4337 mutex_unlock(&conn->chan_lock);
4342 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4343 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4346 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4348 struct l2cap_chan *chan;
4350 if (cmd_len != sizeof(*rsp))
4353 scid = __le16_to_cpu(rsp->scid);
4354 dcid = __le16_to_cpu(rsp->dcid);
4356 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4358 mutex_lock(&conn->chan_lock);
4360 chan = __l2cap_get_chan_by_scid(conn, scid);
4362 mutex_unlock(&conn->chan_lock);
4366 l2cap_chan_lock(chan);
4368 l2cap_chan_hold(chan);
4369 l2cap_chan_del(chan, 0);
4371 l2cap_chan_unlock(chan);
4373 chan->ops->close(chan);
4374 l2cap_chan_put(chan);
4376 mutex_unlock(&conn->chan_lock);
4381 static inline int l2cap_information_req(struct l2cap_conn *conn,
4382 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4385 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4388 if (cmd_len != sizeof(*req))
4391 type = __le16_to_cpu(req->type);
4393 BT_DBG("type 0x%4.4x", type);
4395 if (type == L2CAP_IT_FEAT_MASK) {
4397 u32 feat_mask = l2cap_feat_mask;
4398 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4399 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4400 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4402 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4404 if (conn->hs_enabled)
4405 feat_mask |= L2CAP_FEAT_EXT_FLOW
4406 | L2CAP_FEAT_EXT_WINDOW;
4408 put_unaligned_le32(feat_mask, rsp->data);
4409 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4411 } else if (type == L2CAP_IT_FIXED_CHAN) {
4413 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4415 if (conn->hs_enabled)
4416 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4418 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4420 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4421 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4422 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4423 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4426 struct l2cap_info_rsp rsp;
4427 rsp.type = cpu_to_le16(type);
4428 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4429 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4436 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4437 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4440 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4443 if (cmd_len < sizeof(*rsp))
4446 type = __le16_to_cpu(rsp->type);
4447 result = __le16_to_cpu(rsp->result);
4449 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4451 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4452 if (cmd->ident != conn->info_ident ||
4453 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4456 cancel_delayed_work(&conn->info_timer);
4458 if (result != L2CAP_IR_SUCCESS) {
4459 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4460 conn->info_ident = 0;
4462 l2cap_conn_start(conn);
4468 case L2CAP_IT_FEAT_MASK:
4469 conn->feat_mask = get_unaligned_le32(rsp->data);
4471 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4472 struct l2cap_info_req req;
4473 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4475 conn->info_ident = l2cap_get_ident(conn);
4477 l2cap_send_cmd(conn, conn->info_ident,
4478 L2CAP_INFO_REQ, sizeof(req), &req);
4480 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4481 conn->info_ident = 0;
4483 l2cap_conn_start(conn);
4487 case L2CAP_IT_FIXED_CHAN:
4488 conn->fixed_chan_mask = rsp->data[0];
4489 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4490 conn->info_ident = 0;
4492 l2cap_conn_start(conn);
4499 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4500 struct l2cap_cmd_hdr *cmd,
4501 u16 cmd_len, void *data)
4503 struct l2cap_create_chan_req *req = data;
4504 struct l2cap_create_chan_rsp rsp;
4505 struct l2cap_chan *chan;
4506 struct hci_dev *hdev;
4509 if (cmd_len != sizeof(*req))
4512 if (!conn->hs_enabled)
4515 psm = le16_to_cpu(req->psm);
4516 scid = le16_to_cpu(req->scid);
4518 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4520 /* For controller id 0 make BR/EDR connection */
4521 if (req->amp_id == AMP_ID_BREDR) {
4522 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4527 /* Validate AMP controller id */
4528 hdev = hci_dev_get(req->amp_id);
4532 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4537 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4540 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4541 struct hci_conn *hs_hcon;
4543 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4547 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4552 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4554 mgr->bredr_chan = chan;
4555 chan->hs_hcon = hs_hcon;
4556 chan->fcs = L2CAP_FCS_NONE;
4557 conn->mtu = hdev->block_mtu;
4566 rsp.scid = cpu_to_le16(scid);
4567 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4568 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4570 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4576 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4578 struct l2cap_move_chan_req req;
4581 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4583 ident = l2cap_get_ident(chan->conn);
4584 chan->ident = ident;
4586 req.icid = cpu_to_le16(chan->scid);
4587 req.dest_amp_id = dest_amp_id;
4589 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4592 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4595 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4597 struct l2cap_move_chan_rsp rsp;
4599 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4601 rsp.icid = cpu_to_le16(chan->dcid);
4602 rsp.result = cpu_to_le16(result);
4604 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4608 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4610 struct l2cap_move_chan_cfm cfm;
4612 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4614 chan->ident = l2cap_get_ident(chan->conn);
4616 cfm.icid = cpu_to_le16(chan->scid);
4617 cfm.result = cpu_to_le16(result);
4619 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4622 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4625 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4627 struct l2cap_move_chan_cfm cfm;
4629 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4631 cfm.icid = cpu_to_le16(icid);
4632 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4634 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4638 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4641 struct l2cap_move_chan_cfm_rsp rsp;
4643 BT_DBG("icid 0x%4.4x", icid);
4645 rsp.icid = cpu_to_le16(icid);
4646 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4649 static void __release_logical_link(struct l2cap_chan *chan)
4651 chan->hs_hchan = NULL;
4652 chan->hs_hcon = NULL;
4654 /* Placeholder - release the logical link */
4657 static void l2cap_logical_fail(struct l2cap_chan *chan)
4659 /* Logical link setup failed */
4660 if (chan->state != BT_CONNECTED) {
4661 /* Create channel failure, disconnect */
4662 l2cap_send_disconn_req(chan, ECONNRESET);
4666 switch (chan->move_role) {
4667 case L2CAP_MOVE_ROLE_RESPONDER:
4668 l2cap_move_done(chan);
4669 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4671 case L2CAP_MOVE_ROLE_INITIATOR:
4672 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4673 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4674 /* Remote has only sent pending or
4675 * success responses, clean up
4677 l2cap_move_done(chan);
4680 /* Other amp move states imply that the move
4681 * has already aborted
4683 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4688 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4689 struct hci_chan *hchan)
4691 struct l2cap_conf_rsp rsp;
4693 chan->hs_hchan = hchan;
4694 chan->hs_hcon->l2cap_data = chan->conn;
4696 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4698 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4701 set_default_fcs(chan);
4703 err = l2cap_ertm_init(chan);
4705 l2cap_send_disconn_req(chan, -err);
4707 l2cap_chan_ready(chan);
4711 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4712 struct hci_chan *hchan)
4714 chan->hs_hcon = hchan->conn;
4715 chan->hs_hcon->l2cap_data = chan->conn;
4717 BT_DBG("move_state %d", chan->move_state);
4719 switch (chan->move_state) {
4720 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4721 /* Move confirm will be sent after a success
4722 * response is received
4724 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4726 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4727 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4728 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4729 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4730 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4731 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4732 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4733 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4734 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4738 /* Move was not in expected state, free the channel */
4739 __release_logical_link(chan);
4741 chan->move_state = L2CAP_MOVE_STABLE;
4745 /* Call with chan locked */
4746 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4749 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4752 l2cap_logical_fail(chan);
4753 __release_logical_link(chan);
4757 if (chan->state != BT_CONNECTED) {
4758 /* Ignore logical link if channel is on BR/EDR */
4759 if (chan->local_amp_id != AMP_ID_BREDR)
4760 l2cap_logical_finish_create(chan, hchan);
4762 l2cap_logical_finish_move(chan, hchan);
4766 void l2cap_move_start(struct l2cap_chan *chan)
4768 BT_DBG("chan %p", chan);
4770 if (chan->local_amp_id == AMP_ID_BREDR) {
4771 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4773 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4774 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4775 /* Placeholder - start physical link setup */
4777 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4778 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4780 l2cap_move_setup(chan);
4781 l2cap_send_move_chan_req(chan, 0);
4785 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4786 u8 local_amp_id, u8 remote_amp_id)
4788 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4789 local_amp_id, remote_amp_id);
4791 chan->fcs = L2CAP_FCS_NONE;
4793 /* Outgoing channel on AMP */
4794 if (chan->state == BT_CONNECT) {
4795 if (result == L2CAP_CR_SUCCESS) {
4796 chan->local_amp_id = local_amp_id;
4797 l2cap_send_create_chan_req(chan, remote_amp_id);
4799 /* Revert to BR/EDR connect */
4800 l2cap_send_conn_req(chan);
4806 /* Incoming channel on AMP */
4807 if (__l2cap_no_conn_pending(chan)) {
4808 struct l2cap_conn_rsp rsp;
4810 rsp.scid = cpu_to_le16(chan->dcid);
4811 rsp.dcid = cpu_to_le16(chan->scid);
4813 if (result == L2CAP_CR_SUCCESS) {
4814 /* Send successful response */
4815 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4816 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4818 /* Send negative response */
4819 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4820 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4823 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4826 if (result == L2CAP_CR_SUCCESS) {
4827 l2cap_state_change(chan, BT_CONFIG);
4828 set_bit(CONF_REQ_SENT, &chan->conf_state);
4829 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4831 l2cap_build_conf_req(chan, buf), buf);
4832 chan->num_conf_req++;
4837 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4840 l2cap_move_setup(chan);
4841 chan->move_id = local_amp_id;
4842 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4844 l2cap_send_move_chan_req(chan, remote_amp_id);
4847 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4849 struct hci_chan *hchan = NULL;
4851 /* Placeholder - get hci_chan for logical link */
4854 if (hchan->state == BT_CONNECTED) {
4855 /* Logical link is ready to go */
4856 chan->hs_hcon = hchan->conn;
4857 chan->hs_hcon->l2cap_data = chan->conn;
4858 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4859 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4861 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4863 /* Wait for logical link to be ready */
4864 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4867 /* Logical link not available */
4868 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4872 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4874 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4876 if (result == -EINVAL)
4877 rsp_result = L2CAP_MR_BAD_ID;
4879 rsp_result = L2CAP_MR_NOT_ALLOWED;
4881 l2cap_send_move_chan_rsp(chan, rsp_result);
4884 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4885 chan->move_state = L2CAP_MOVE_STABLE;
4887 /* Restart data transmission */
4888 l2cap_ertm_send(chan);
4891 /* Invoke with locked chan */
4892 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4894 u8 local_amp_id = chan->local_amp_id;
4895 u8 remote_amp_id = chan->remote_amp_id;
4897 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4898 chan, result, local_amp_id, remote_amp_id);
4900 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4901 l2cap_chan_unlock(chan);
4905 if (chan->state != BT_CONNECTED) {
4906 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4907 } else if (result != L2CAP_MR_SUCCESS) {
4908 l2cap_do_move_cancel(chan, result);
4910 switch (chan->move_role) {
4911 case L2CAP_MOVE_ROLE_INITIATOR:
4912 l2cap_do_move_initiate(chan, local_amp_id,
4915 case L2CAP_MOVE_ROLE_RESPONDER:
4916 l2cap_do_move_respond(chan, result);
4919 l2cap_do_move_cancel(chan, result);
4925 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4926 struct l2cap_cmd_hdr *cmd,
4927 u16 cmd_len, void *data)
4929 struct l2cap_move_chan_req *req = data;
4930 struct l2cap_move_chan_rsp rsp;
4931 struct l2cap_chan *chan;
4933 u16 result = L2CAP_MR_NOT_ALLOWED;
4935 if (cmd_len != sizeof(*req))
4938 icid = le16_to_cpu(req->icid);
4940 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4942 if (!conn->hs_enabled)
4945 chan = l2cap_get_chan_by_dcid(conn, icid);
4947 rsp.icid = cpu_to_le16(icid);
4948 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4949 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4954 chan->ident = cmd->ident;
4956 if (chan->scid < L2CAP_CID_DYN_START ||
4957 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4958 (chan->mode != L2CAP_MODE_ERTM &&
4959 chan->mode != L2CAP_MODE_STREAMING)) {
4960 result = L2CAP_MR_NOT_ALLOWED;
4961 goto send_move_response;
4964 if (chan->local_amp_id == req->dest_amp_id) {
4965 result = L2CAP_MR_SAME_ID;
4966 goto send_move_response;
4969 if (req->dest_amp_id != AMP_ID_BREDR) {
4970 struct hci_dev *hdev;
4971 hdev = hci_dev_get(req->dest_amp_id);
4972 if (!hdev || hdev->dev_type != HCI_AMP ||
4973 !test_bit(HCI_UP, &hdev->flags)) {
4977 result = L2CAP_MR_BAD_ID;
4978 goto send_move_response;
4983 /* Detect a move collision. Only send a collision response
4984 * if this side has "lost", otherwise proceed with the move.
4985 * The winner has the larger bd_addr.
4987 if ((__chan_is_moving(chan) ||
4988 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4989 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4990 result = L2CAP_MR_COLLISION;
4991 goto send_move_response;
4994 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4995 l2cap_move_setup(chan);
4996 chan->move_id = req->dest_amp_id;
4999 if (req->dest_amp_id == AMP_ID_BREDR) {
5000 /* Moving to BR/EDR */
5001 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5002 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5003 result = L2CAP_MR_PEND;
5005 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5006 result = L2CAP_MR_SUCCESS;
5009 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5010 /* Placeholder - uncomment when amp functions are available */
5011 /*amp_accept_physical(chan, req->dest_amp_id);*/
5012 result = L2CAP_MR_PEND;
5016 l2cap_send_move_chan_rsp(chan, result);
5018 l2cap_chan_unlock(chan);
5023 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5025 struct l2cap_chan *chan;
5026 struct hci_chan *hchan = NULL;
5028 chan = l2cap_get_chan_by_scid(conn, icid);
5030 l2cap_send_move_chan_cfm_icid(conn, icid);
5034 __clear_chan_timer(chan);
5035 if (result == L2CAP_MR_PEND)
5036 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5038 switch (chan->move_state) {
5039 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5040 /* Move confirm will be sent when logical link
5043 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5045 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5046 if (result == L2CAP_MR_PEND) {
5048 } else if (test_bit(CONN_LOCAL_BUSY,
5049 &chan->conn_state)) {
5050 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5052 /* Logical link is up or moving to BR/EDR,
5055 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5056 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5059 case L2CAP_MOVE_WAIT_RSP:
5061 if (result == L2CAP_MR_SUCCESS) {
5062 /* Remote is ready, send confirm immediately
5063 * after logical link is ready
5065 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5067 /* Both logical link and move success
5068 * are required to confirm
5070 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5073 /* Placeholder - get hci_chan for logical link */
5075 /* Logical link not available */
5076 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5080 /* If the logical link is not yet connected, do not
5081 * send confirmation.
5083 if (hchan->state != BT_CONNECTED)
5086 /* Logical link is already ready to go */
5088 chan->hs_hcon = hchan->conn;
5089 chan->hs_hcon->l2cap_data = chan->conn;
5091 if (result == L2CAP_MR_SUCCESS) {
5092 /* Can confirm now */
5093 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5095 /* Now only need move success
5098 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5101 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5104 /* Any other amp move state means the move failed. */
5105 chan->move_id = chan->local_amp_id;
5106 l2cap_move_done(chan);
5107 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5110 l2cap_chan_unlock(chan);
5113 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5116 struct l2cap_chan *chan;
5118 chan = l2cap_get_chan_by_ident(conn, ident);
5120 /* Could not locate channel, icid is best guess */
5121 l2cap_send_move_chan_cfm_icid(conn, icid);
5125 __clear_chan_timer(chan);
5127 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5128 if (result == L2CAP_MR_COLLISION) {
5129 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5131 /* Cleanup - cancel move */
5132 chan->move_id = chan->local_amp_id;
5133 l2cap_move_done(chan);
5137 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5139 l2cap_chan_unlock(chan);
5142 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5143 struct l2cap_cmd_hdr *cmd,
5144 u16 cmd_len, void *data)
5146 struct l2cap_move_chan_rsp *rsp = data;
5149 if (cmd_len != sizeof(*rsp))
5152 icid = le16_to_cpu(rsp->icid);
5153 result = le16_to_cpu(rsp->result);
5155 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5157 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5158 l2cap_move_continue(conn, icid, result);
5160 l2cap_move_fail(conn, cmd->ident, icid, result);
5165 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5166 struct l2cap_cmd_hdr *cmd,
5167 u16 cmd_len, void *data)
5169 struct l2cap_move_chan_cfm *cfm = data;
5170 struct l2cap_chan *chan;
5173 if (cmd_len != sizeof(*cfm))
5176 icid = le16_to_cpu(cfm->icid);
5177 result = le16_to_cpu(cfm->result);
5179 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5181 chan = l2cap_get_chan_by_dcid(conn, icid);
5183 /* Spec requires a response even if the icid was not found */
5184 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5188 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5189 if (result == L2CAP_MC_CONFIRMED) {
5190 chan->local_amp_id = chan->move_id;
5191 if (chan->local_amp_id == AMP_ID_BREDR)
5192 __release_logical_link(chan);
5194 chan->move_id = chan->local_amp_id;
5197 l2cap_move_done(chan);
5200 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5202 l2cap_chan_unlock(chan);
5207 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5208 struct l2cap_cmd_hdr *cmd,
5209 u16 cmd_len, void *data)
5211 struct l2cap_move_chan_cfm_rsp *rsp = data;
5212 struct l2cap_chan *chan;
5215 if (cmd_len != sizeof(*rsp))
5218 icid = le16_to_cpu(rsp->icid);
5220 BT_DBG("icid 0x%4.4x", icid);
5222 chan = l2cap_get_chan_by_scid(conn, icid);
5226 __clear_chan_timer(chan);
5228 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5229 chan->local_amp_id = chan->move_id;
5231 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5232 __release_logical_link(chan);
5234 l2cap_move_done(chan);
5237 l2cap_chan_unlock(chan);
5242 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5247 if (min > max || min < 6 || max > 3200)
5250 if (to_multiplier < 10 || to_multiplier > 3200)
5253 if (max >= to_multiplier * 8)
5256 max_latency = (to_multiplier * 8 / max) - 1;
5257 if (latency > 499 || latency > max_latency)
5263 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5264 struct l2cap_cmd_hdr *cmd,
5265 u16 cmd_len, u8 *data)
5267 struct hci_conn *hcon = conn->hcon;
5268 struct l2cap_conn_param_update_req *req;
5269 struct l2cap_conn_param_update_rsp rsp;
5270 u16 min, max, latency, to_multiplier;
5273 if (!(hcon->link_mode & HCI_LM_MASTER))
5276 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5279 req = (struct l2cap_conn_param_update_req *) data;
5280 min = __le16_to_cpu(req->min);
5281 max = __le16_to_cpu(req->max);
5282 latency = __le16_to_cpu(req->latency);
5283 to_multiplier = __le16_to_cpu(req->to_multiplier);
5285 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5286 min, max, latency, to_multiplier);
5288 memset(&rsp, 0, sizeof(rsp));
5290 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5292 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5294 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5296 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5300 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5305 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5306 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5309 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5310 u16 dcid, mtu, mps, credits, result;
5311 struct l2cap_chan *chan;
5314 if (cmd_len < sizeof(*rsp))
5317 dcid = __le16_to_cpu(rsp->dcid);
5318 mtu = __le16_to_cpu(rsp->mtu);
5319 mps = __le16_to_cpu(rsp->mps);
5320 credits = __le16_to_cpu(rsp->credits);
5321 result = __le16_to_cpu(rsp->result);
5323 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5326 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5327 dcid, mtu, mps, credits, result);
5329 mutex_lock(&conn->chan_lock);
5331 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5339 l2cap_chan_lock(chan);
5342 case L2CAP_CR_SUCCESS:
5346 chan->remote_mps = mps;
5347 chan->tx_credits = credits;
5348 l2cap_chan_ready(chan);
5352 l2cap_chan_del(chan, ECONNREFUSED);
5356 l2cap_chan_unlock(chan);
5359 mutex_unlock(&conn->chan_lock);
5364 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5365 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5370 switch (cmd->code) {
5371 case L2CAP_COMMAND_REJ:
5372 l2cap_command_rej(conn, cmd, cmd_len, data);
5375 case L2CAP_CONN_REQ:
5376 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5379 case L2CAP_CONN_RSP:
5380 case L2CAP_CREATE_CHAN_RSP:
5381 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5384 case L2CAP_CONF_REQ:
5385 err = l2cap_config_req(conn, cmd, cmd_len, data);
5388 case L2CAP_CONF_RSP:
5389 l2cap_config_rsp(conn, cmd, cmd_len, data);
5392 case L2CAP_DISCONN_REQ:
5393 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5396 case L2CAP_DISCONN_RSP:
5397 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5400 case L2CAP_ECHO_REQ:
5401 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5404 case L2CAP_ECHO_RSP:
5407 case L2CAP_INFO_REQ:
5408 err = l2cap_information_req(conn, cmd, cmd_len, data);
5411 case L2CAP_INFO_RSP:
5412 l2cap_information_rsp(conn, cmd, cmd_len, data);
5415 case L2CAP_CREATE_CHAN_REQ:
5416 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5419 case L2CAP_MOVE_CHAN_REQ:
5420 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5423 case L2CAP_MOVE_CHAN_RSP:
5424 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5427 case L2CAP_MOVE_CHAN_CFM:
5428 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5431 case L2CAP_MOVE_CHAN_CFM_RSP:
5432 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5436 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5444 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5445 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5448 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5449 struct l2cap_le_conn_rsp rsp;
5450 struct l2cap_chan *chan, *pchan;
5451 u16 dcid, scid, credits, mtu, mps;
5455 if (cmd_len != sizeof(*req))
5458 scid = __le16_to_cpu(req->scid);
5459 mtu = __le16_to_cpu(req->mtu);
5460 mps = __le16_to_cpu(req->mps);
5465 if (mtu < 23 || mps < 23)
5468 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5471 /* Check if we have socket listening on psm */
5472 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5473 &conn->hcon->dst, LE_LINK);
5475 result = L2CAP_CR_BAD_PSM;
5480 mutex_lock(&conn->chan_lock);
5481 l2cap_chan_lock(pchan);
5483 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5484 result = L2CAP_CR_AUTHENTICATION;
5486 goto response_unlock;
5489 /* Check if we already have channel with that dcid */
5490 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5491 result = L2CAP_CR_NO_MEM;
5493 goto response_unlock;
5496 chan = pchan->ops->new_connection(pchan);
5498 result = L2CAP_CR_NO_MEM;
5499 goto response_unlock;
5502 bacpy(&chan->src, &conn->hcon->src);
5503 bacpy(&chan->dst, &conn->hcon->dst);
5504 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5505 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5509 chan->remote_mps = mps;
5510 chan->tx_credits = __le16_to_cpu(req->credits);
5512 __l2cap_chan_add(conn, chan);
5514 credits = chan->rx_credits;
5516 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5518 chan->ident = cmd->ident;
5520 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5521 l2cap_state_change(chan, BT_CONNECT2);
5522 result = L2CAP_CR_PEND;
5523 chan->ops->defer(chan);
5525 l2cap_chan_ready(chan);
5526 result = L2CAP_CR_SUCCESS;
5530 l2cap_chan_unlock(pchan);
5531 mutex_unlock(&conn->chan_lock);
5533 if (result == L2CAP_CR_PEND)
5538 rsp.mtu = cpu_to_le16(chan->imtu);
5539 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
5545 rsp.dcid = cpu_to_le16(dcid);
5546 rsp.credits = cpu_to_le16(credits);
5547 rsp.result = cpu_to_le16(result);
5549 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5554 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5555 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5560 switch (cmd->code) {
5561 case L2CAP_COMMAND_REJ:
5564 case L2CAP_CONN_PARAM_UPDATE_REQ:
5565 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5568 case L2CAP_CONN_PARAM_UPDATE_RSP:
5571 case L2CAP_LE_CONN_RSP:
5572 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5575 case L2CAP_LE_CONN_REQ:
5576 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5579 case L2CAP_DISCONN_REQ:
5580 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5583 case L2CAP_DISCONN_RSP:
5584 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5588 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5596 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5597 struct sk_buff *skb)
5599 struct hci_conn *hcon = conn->hcon;
5600 struct l2cap_cmd_hdr *cmd;
5604 if (hcon->type != LE_LINK)
5607 if (skb->len < L2CAP_CMD_HDR_SIZE)
5610 cmd = (void *) skb->data;
5611 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5613 len = le16_to_cpu(cmd->len);
5615 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5617 if (len != skb->len || !cmd->ident) {
5618 BT_DBG("corrupted command");
5622 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5624 struct l2cap_cmd_rej_unk rej;
5626 BT_ERR("Wrong link type (%d)", err);
5628 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5629 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5637 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5638 struct sk_buff *skb)
5640 struct hci_conn *hcon = conn->hcon;
5641 u8 *data = skb->data;
5643 struct l2cap_cmd_hdr cmd;
5646 l2cap_raw_recv(conn, skb);
5648 if (hcon->type != ACL_LINK)
5651 while (len >= L2CAP_CMD_HDR_SIZE) {
5653 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5654 data += L2CAP_CMD_HDR_SIZE;
5655 len -= L2CAP_CMD_HDR_SIZE;
5657 cmd_len = le16_to_cpu(cmd.len);
5659 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5662 if (cmd_len > len || !cmd.ident) {
5663 BT_DBG("corrupted command");
5667 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5669 struct l2cap_cmd_rej_unk rej;
5671 BT_ERR("Wrong link type (%d)", err);
5673 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5674 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5686 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5688 u16 our_fcs, rcv_fcs;
5691 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5692 hdr_size = L2CAP_EXT_HDR_SIZE;
5694 hdr_size = L2CAP_ENH_HDR_SIZE;
5696 if (chan->fcs == L2CAP_FCS_CRC16) {
5697 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5698 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5699 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5701 if (our_fcs != rcv_fcs)
5707 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5709 struct l2cap_ctrl control;
5711 BT_DBG("chan %p", chan);
5713 memset(&control, 0, sizeof(control));
5716 control.reqseq = chan->buffer_seq;
5717 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5719 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5720 control.super = L2CAP_SUPER_RNR;
5721 l2cap_send_sframe(chan, &control);
5724 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5725 chan->unacked_frames > 0)
5726 __set_retrans_timer(chan);
5728 /* Send pending iframes */
5729 l2cap_ertm_send(chan);
5731 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5732 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5733 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5736 control.super = L2CAP_SUPER_RR;
5737 l2cap_send_sframe(chan, &control);
5741 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5742 struct sk_buff **last_frag)
5744 /* skb->len reflects data in skb as well as all fragments
5745 * skb->data_len reflects only data in fragments
5747 if (!skb_has_frag_list(skb))
5748 skb_shinfo(skb)->frag_list = new_frag;
5750 new_frag->next = NULL;
5752 (*last_frag)->next = new_frag;
5753 *last_frag = new_frag;
5755 skb->len += new_frag->len;
5756 skb->data_len += new_frag->len;
5757 skb->truesize += new_frag->truesize;
5760 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5761 struct l2cap_ctrl *control)
5765 switch (control->sar) {
5766 case L2CAP_SAR_UNSEGMENTED:
5770 err = chan->ops->recv(chan, skb);
5773 case L2CAP_SAR_START:
5777 chan->sdu_len = get_unaligned_le16(skb->data);
5778 skb_pull(skb, L2CAP_SDULEN_SIZE);
5780 if (chan->sdu_len > chan->imtu) {
5785 if (skb->len >= chan->sdu_len)
5789 chan->sdu_last_frag = skb;
5795 case L2CAP_SAR_CONTINUE:
5799 append_skb_frag(chan->sdu, skb,
5800 &chan->sdu_last_frag);
5803 if (chan->sdu->len >= chan->sdu_len)
5813 append_skb_frag(chan->sdu, skb,
5814 &chan->sdu_last_frag);
5817 if (chan->sdu->len != chan->sdu_len)
5820 err = chan->ops->recv(chan, chan->sdu);
5823 /* Reassembly complete */
5825 chan->sdu_last_frag = NULL;
5833 kfree_skb(chan->sdu);
5835 chan->sdu_last_frag = NULL;
5842 static int l2cap_resegment(struct l2cap_chan *chan)
5848 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5852 if (chan->mode != L2CAP_MODE_ERTM)
5855 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5856 l2cap_tx(chan, NULL, NULL, event);
5859 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5862 /* Pass sequential frames to l2cap_reassemble_sdu()
5863 * until a gap is encountered.
5866 BT_DBG("chan %p", chan);
5868 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5869 struct sk_buff *skb;
5870 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5871 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5873 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5878 skb_unlink(skb, &chan->srej_q);
5879 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5880 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5885 if (skb_queue_empty(&chan->srej_q)) {
5886 chan->rx_state = L2CAP_RX_STATE_RECV;
5887 l2cap_send_ack(chan);
5893 static void l2cap_handle_srej(struct l2cap_chan *chan,
5894 struct l2cap_ctrl *control)
5896 struct sk_buff *skb;
5898 BT_DBG("chan %p, control %p", chan, control);
5900 if (control->reqseq == chan->next_tx_seq) {
5901 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5902 l2cap_send_disconn_req(chan, ECONNRESET);
5906 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5909 BT_DBG("Seq %d not available for retransmission",
5914 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5915 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5916 l2cap_send_disconn_req(chan, ECONNRESET);
5920 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5922 if (control->poll) {
5923 l2cap_pass_to_tx(chan, control);
5925 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5926 l2cap_retransmit(chan, control);
5927 l2cap_ertm_send(chan);
5929 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5930 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5931 chan->srej_save_reqseq = control->reqseq;
5934 l2cap_pass_to_tx_fbit(chan, control);
5936 if (control->final) {
5937 if (chan->srej_save_reqseq != control->reqseq ||
5938 !test_and_clear_bit(CONN_SREJ_ACT,
5940 l2cap_retransmit(chan, control);
5942 l2cap_retransmit(chan, control);
5943 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5944 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5945 chan->srej_save_reqseq = control->reqseq;
5951 static void l2cap_handle_rej(struct l2cap_chan *chan,
5952 struct l2cap_ctrl *control)
5954 struct sk_buff *skb;
5956 BT_DBG("chan %p, control %p", chan, control);
5958 if (control->reqseq == chan->next_tx_seq) {
5959 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5960 l2cap_send_disconn_req(chan, ECONNRESET);
5964 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5966 if (chan->max_tx && skb &&
5967 bt_cb(skb)->control.retries >= chan->max_tx) {
5968 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5969 l2cap_send_disconn_req(chan, ECONNRESET);
5973 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5975 l2cap_pass_to_tx(chan, control);
5977 if (control->final) {
5978 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5979 l2cap_retransmit_all(chan, control);
5981 l2cap_retransmit_all(chan, control);
5982 l2cap_ertm_send(chan);
5983 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5984 set_bit(CONN_REJ_ACT, &chan->conn_state);
5988 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5990 BT_DBG("chan %p, txseq %d", chan, txseq);
5992 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5993 chan->expected_tx_seq);
5995 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5996 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5998 /* See notes below regarding "double poll" and
6001 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6002 BT_DBG("Invalid/Ignore - after SREJ");
6003 return L2CAP_TXSEQ_INVALID_IGNORE;
6005 BT_DBG("Invalid - in window after SREJ sent");
6006 return L2CAP_TXSEQ_INVALID;
6010 if (chan->srej_list.head == txseq) {
6011 BT_DBG("Expected SREJ");
6012 return L2CAP_TXSEQ_EXPECTED_SREJ;
6015 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6016 BT_DBG("Duplicate SREJ - txseq already stored");
6017 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6020 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6021 BT_DBG("Unexpected SREJ - not requested");
6022 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6026 if (chan->expected_tx_seq == txseq) {
6027 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6029 BT_DBG("Invalid - txseq outside tx window");
6030 return L2CAP_TXSEQ_INVALID;
6033 return L2CAP_TXSEQ_EXPECTED;
6037 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6038 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6039 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6040 return L2CAP_TXSEQ_DUPLICATE;
6043 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6044 /* A source of invalid packets is a "double poll" condition,
6045 * where delays cause us to send multiple poll packets. If
6046 * the remote stack receives and processes both polls,
6047 * sequence numbers can wrap around in such a way that a
6048 * resent frame has a sequence number that looks like new data
6049 * with a sequence gap. This would trigger an erroneous SREJ
6052 * Fortunately, this is impossible with a tx window that's
6053 * less than half of the maximum sequence number, which allows
6054 * invalid frames to be safely ignored.
6056 * With tx window sizes greater than half of the tx window
6057 * maximum, the frame is invalid and cannot be ignored. This
6058 * causes a disconnect.
6061 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6062 BT_DBG("Invalid/Ignore - txseq outside tx window");
6063 return L2CAP_TXSEQ_INVALID_IGNORE;
6065 BT_DBG("Invalid - txseq outside tx window");
6066 return L2CAP_TXSEQ_INVALID;
6069 BT_DBG("Unexpected - txseq indicates missing frames");
6070 return L2CAP_TXSEQ_UNEXPECTED;
6074 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6075 struct l2cap_ctrl *control,
6076 struct sk_buff *skb, u8 event)
6079 bool skb_in_use = false;
6081 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6085 case L2CAP_EV_RECV_IFRAME:
6086 switch (l2cap_classify_txseq(chan, control->txseq)) {
6087 case L2CAP_TXSEQ_EXPECTED:
6088 l2cap_pass_to_tx(chan, control);
6090 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6091 BT_DBG("Busy, discarding expected seq %d",
6096 chan->expected_tx_seq = __next_seq(chan,
6099 chan->buffer_seq = chan->expected_tx_seq;
6102 err = l2cap_reassemble_sdu(chan, skb, control);
6106 if (control->final) {
6107 if (!test_and_clear_bit(CONN_REJ_ACT,
6108 &chan->conn_state)) {
6110 l2cap_retransmit_all(chan, control);
6111 l2cap_ertm_send(chan);
6115 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6116 l2cap_send_ack(chan);
6118 case L2CAP_TXSEQ_UNEXPECTED:
6119 l2cap_pass_to_tx(chan, control);
6121 /* Can't issue SREJ frames in the local busy state.
6122 * Drop this frame, it will be seen as missing
6123 * when local busy is exited.
6125 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6126 BT_DBG("Busy, discarding unexpected seq %d",
6131 /* There was a gap in the sequence, so an SREJ
6132 * must be sent for each missing frame. The
6133 * current frame is stored for later use.
6135 skb_queue_tail(&chan->srej_q, skb);
6137 BT_DBG("Queued %p (queue len %d)", skb,
6138 skb_queue_len(&chan->srej_q));
6140 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6141 l2cap_seq_list_clear(&chan->srej_list);
6142 l2cap_send_srej(chan, control->txseq);
6144 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6146 case L2CAP_TXSEQ_DUPLICATE:
6147 l2cap_pass_to_tx(chan, control);
6149 case L2CAP_TXSEQ_INVALID_IGNORE:
6151 case L2CAP_TXSEQ_INVALID:
6153 l2cap_send_disconn_req(chan, ECONNRESET);
6157 case L2CAP_EV_RECV_RR:
6158 l2cap_pass_to_tx(chan, control);
6159 if (control->final) {
6160 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6162 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6163 !__chan_is_moving(chan)) {
6165 l2cap_retransmit_all(chan, control);
6168 l2cap_ertm_send(chan);
6169 } else if (control->poll) {
6170 l2cap_send_i_or_rr_or_rnr(chan);
6172 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6173 &chan->conn_state) &&
6174 chan->unacked_frames)
6175 __set_retrans_timer(chan);
6177 l2cap_ertm_send(chan);
6180 case L2CAP_EV_RECV_RNR:
6181 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6182 l2cap_pass_to_tx(chan, control);
6183 if (control && control->poll) {
6184 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6185 l2cap_send_rr_or_rnr(chan, 0);
6187 __clear_retrans_timer(chan);
6188 l2cap_seq_list_clear(&chan->retrans_list);
6190 case L2CAP_EV_RECV_REJ:
6191 l2cap_handle_rej(chan, control);
6193 case L2CAP_EV_RECV_SREJ:
6194 l2cap_handle_srej(chan, control);
6200 if (skb && !skb_in_use) {
6201 BT_DBG("Freeing %p", skb);
6208 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6209 struct l2cap_ctrl *control,
6210 struct sk_buff *skb, u8 event)
6213 u16 txseq = control->txseq;
6214 bool skb_in_use = false;
6216 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6220 case L2CAP_EV_RECV_IFRAME:
6221 switch (l2cap_classify_txseq(chan, txseq)) {
6222 case L2CAP_TXSEQ_EXPECTED:
6223 /* Keep frame for reassembly later */
6224 l2cap_pass_to_tx(chan, control);
6225 skb_queue_tail(&chan->srej_q, skb);
6227 BT_DBG("Queued %p (queue len %d)", skb,
6228 skb_queue_len(&chan->srej_q));
6230 chan->expected_tx_seq = __next_seq(chan, txseq);
6232 case L2CAP_TXSEQ_EXPECTED_SREJ:
6233 l2cap_seq_list_pop(&chan->srej_list);
6235 l2cap_pass_to_tx(chan, control);
6236 skb_queue_tail(&chan->srej_q, skb);
6238 BT_DBG("Queued %p (queue len %d)", skb,
6239 skb_queue_len(&chan->srej_q));
6241 err = l2cap_rx_queued_iframes(chan);
6246 case L2CAP_TXSEQ_UNEXPECTED:
6247 /* Got a frame that can't be reassembled yet.
6248 * Save it for later, and send SREJs to cover
6249 * the missing frames.
6251 skb_queue_tail(&chan->srej_q, skb);
6253 BT_DBG("Queued %p (queue len %d)", skb,
6254 skb_queue_len(&chan->srej_q));
6256 l2cap_pass_to_tx(chan, control);
6257 l2cap_send_srej(chan, control->txseq);
6259 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6260 /* This frame was requested with an SREJ, but
6261 * some expected retransmitted frames are
6262 * missing. Request retransmission of missing
6265 skb_queue_tail(&chan->srej_q, skb);
6267 BT_DBG("Queued %p (queue len %d)", skb,
6268 skb_queue_len(&chan->srej_q));
6270 l2cap_pass_to_tx(chan, control);
6271 l2cap_send_srej_list(chan, control->txseq);
6273 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6274 /* We've already queued this frame. Drop this copy. */
6275 l2cap_pass_to_tx(chan, control);
6277 case L2CAP_TXSEQ_DUPLICATE:
6278 /* Expecting a later sequence number, so this frame
6279 * was already received. Ignore it completely.
6282 case L2CAP_TXSEQ_INVALID_IGNORE:
6284 case L2CAP_TXSEQ_INVALID:
6286 l2cap_send_disconn_req(chan, ECONNRESET);
6290 case L2CAP_EV_RECV_RR:
6291 l2cap_pass_to_tx(chan, control);
6292 if (control->final) {
6293 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6295 if (!test_and_clear_bit(CONN_REJ_ACT,
6296 &chan->conn_state)) {
6298 l2cap_retransmit_all(chan, control);
6301 l2cap_ertm_send(chan);
6302 } else if (control->poll) {
6303 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6304 &chan->conn_state) &&
6305 chan->unacked_frames) {
6306 __set_retrans_timer(chan);
6309 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6310 l2cap_send_srej_tail(chan);
6312 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6313 &chan->conn_state) &&
6314 chan->unacked_frames)
6315 __set_retrans_timer(chan);
6317 l2cap_send_ack(chan);
6320 case L2CAP_EV_RECV_RNR:
6321 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6322 l2cap_pass_to_tx(chan, control);
6323 if (control->poll) {
6324 l2cap_send_srej_tail(chan);
6326 struct l2cap_ctrl rr_control;
6327 memset(&rr_control, 0, sizeof(rr_control));
6328 rr_control.sframe = 1;
6329 rr_control.super = L2CAP_SUPER_RR;
6330 rr_control.reqseq = chan->buffer_seq;
6331 l2cap_send_sframe(chan, &rr_control);
6335 case L2CAP_EV_RECV_REJ:
6336 l2cap_handle_rej(chan, control);
6338 case L2CAP_EV_RECV_SREJ:
6339 l2cap_handle_srej(chan, control);
6343 if (skb && !skb_in_use) {
6344 BT_DBG("Freeing %p", skb);
6351 static int l2cap_finish_move(struct l2cap_chan *chan)
6353 BT_DBG("chan %p", chan);
6355 chan->rx_state = L2CAP_RX_STATE_RECV;
6358 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6360 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6362 return l2cap_resegment(chan);
6365 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6366 struct l2cap_ctrl *control,
6367 struct sk_buff *skb, u8 event)
6371 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6377 l2cap_process_reqseq(chan, control->reqseq);
6379 if (!skb_queue_empty(&chan->tx_q))
6380 chan->tx_send_head = skb_peek(&chan->tx_q);
6382 chan->tx_send_head = NULL;
6384 /* Rewind next_tx_seq to the point expected
6387 chan->next_tx_seq = control->reqseq;
6388 chan->unacked_frames = 0;
6390 err = l2cap_finish_move(chan);
6394 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6395 l2cap_send_i_or_rr_or_rnr(chan);
6397 if (event == L2CAP_EV_RECV_IFRAME)
6400 return l2cap_rx_state_recv(chan, control, NULL, event);
6403 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6404 struct l2cap_ctrl *control,
6405 struct sk_buff *skb, u8 event)
6409 if (!control->final)
6412 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6414 chan->rx_state = L2CAP_RX_STATE_RECV;
6415 l2cap_process_reqseq(chan, control->reqseq);
6417 if (!skb_queue_empty(&chan->tx_q))
6418 chan->tx_send_head = skb_peek(&chan->tx_q);
6420 chan->tx_send_head = NULL;
6422 /* Rewind next_tx_seq to the point expected
6425 chan->next_tx_seq = control->reqseq;
6426 chan->unacked_frames = 0;
6429 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6431 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6433 err = l2cap_resegment(chan);
6436 err = l2cap_rx_state_recv(chan, control, skb, event);
6441 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6443 /* Make sure reqseq is for a packet that has been sent but not acked */
6446 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6447 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6450 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6451 struct sk_buff *skb, u8 event)
6455 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6456 control, skb, event, chan->rx_state);
6458 if (__valid_reqseq(chan, control->reqseq)) {
6459 switch (chan->rx_state) {
6460 case L2CAP_RX_STATE_RECV:
6461 err = l2cap_rx_state_recv(chan, control, skb, event);
6463 case L2CAP_RX_STATE_SREJ_SENT:
6464 err = l2cap_rx_state_srej_sent(chan, control, skb,
6467 case L2CAP_RX_STATE_WAIT_P:
6468 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6470 case L2CAP_RX_STATE_WAIT_F:
6471 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6478 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6479 control->reqseq, chan->next_tx_seq,
6480 chan->expected_ack_seq);
6481 l2cap_send_disconn_req(chan, ECONNRESET);
6487 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6488 struct sk_buff *skb)
6492 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6495 if (l2cap_classify_txseq(chan, control->txseq) ==
6496 L2CAP_TXSEQ_EXPECTED) {
6497 l2cap_pass_to_tx(chan, control);
6499 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6500 __next_seq(chan, chan->buffer_seq));
6502 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6504 l2cap_reassemble_sdu(chan, skb, control);
6507 kfree_skb(chan->sdu);
6510 chan->sdu_last_frag = NULL;
6514 BT_DBG("Freeing %p", skb);
6519 chan->last_acked_seq = control->txseq;
6520 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6525 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6527 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6531 __unpack_control(chan, skb);
6536 * We can just drop the corrupted I-frame here.
6537 * Receiver will miss it and start proper recovery
6538 * procedures and ask for retransmission.
6540 if (l2cap_check_fcs(chan, skb))
6543 if (!control->sframe && control->sar == L2CAP_SAR_START)
6544 len -= L2CAP_SDULEN_SIZE;
6546 if (chan->fcs == L2CAP_FCS_CRC16)
6547 len -= L2CAP_FCS_SIZE;
6549 if (len > chan->mps) {
6550 l2cap_send_disconn_req(chan, ECONNRESET);
6554 if (!control->sframe) {
6557 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6558 control->sar, control->reqseq, control->final,
6561 /* Validate F-bit - F=0 always valid, F=1 only
6562 * valid in TX WAIT_F
6564 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6567 if (chan->mode != L2CAP_MODE_STREAMING) {
6568 event = L2CAP_EV_RECV_IFRAME;
6569 err = l2cap_rx(chan, control, skb, event);
6571 err = l2cap_stream_rx(chan, control, skb);
6575 l2cap_send_disconn_req(chan, ECONNRESET);
6577 const u8 rx_func_to_event[4] = {
6578 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6579 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6582 /* Only I-frames are expected in streaming mode */
6583 if (chan->mode == L2CAP_MODE_STREAMING)
6586 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6587 control->reqseq, control->final, control->poll,
6591 BT_ERR("Trailing bytes: %d in sframe", len);
6592 l2cap_send_disconn_req(chan, ECONNRESET);
6596 /* Validate F and P bits */
6597 if (control->final && (control->poll ||
6598 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6601 event = rx_func_to_event[control->super];
6602 if (l2cap_rx(chan, control, skb, event))
6603 l2cap_send_disconn_req(chan, ECONNRESET);
6613 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6614 struct sk_buff *skb)
6616 struct l2cap_chan *chan;
6618 chan = l2cap_get_chan_by_scid(conn, cid);
6620 if (cid == L2CAP_CID_A2MP) {
6621 chan = a2mp_channel_create(conn, skb);
6627 l2cap_chan_lock(chan);
6629 BT_DBG("unknown cid 0x%4.4x", cid);
6630 /* Drop packet and return */
6636 BT_DBG("chan %p, len %d", chan, skb->len);
6638 if (chan->state != BT_CONNECTED)
6641 switch (chan->mode) {
6642 case L2CAP_MODE_LE_FLOWCTL:
6643 case L2CAP_MODE_BASIC:
6644 /* If socket recv buffers overflows we drop data here
6645 * which is *bad* because L2CAP has to be reliable.
6646 * But we don't have any other choice. L2CAP doesn't
6647 * provide flow control mechanism. */
6649 if (chan->imtu < skb->len)
6652 if (!chan->ops->recv(chan, skb))
6656 case L2CAP_MODE_ERTM:
6657 case L2CAP_MODE_STREAMING:
6658 l2cap_data_rcv(chan, skb);
6662 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6670 l2cap_chan_unlock(chan);
6673 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6674 struct sk_buff *skb)
6676 struct hci_conn *hcon = conn->hcon;
6677 struct l2cap_chan *chan;
6679 if (hcon->type != ACL_LINK)
6682 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6687 BT_DBG("chan %p, len %d", chan, skb->len);
6689 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6692 if (chan->imtu < skb->len)
6695 /* Store remote BD_ADDR and PSM for msg_name */
6696 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6697 bt_cb(skb)->psm = psm;
6699 if (!chan->ops->recv(chan, skb))
6706 static void l2cap_att_channel(struct l2cap_conn *conn,
6707 struct sk_buff *skb)
6709 struct hci_conn *hcon = conn->hcon;
6710 struct l2cap_chan *chan;
6712 if (hcon->type != LE_LINK)
6715 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6716 &hcon->src, &hcon->dst);
6720 BT_DBG("chan %p, len %d", chan, skb->len);
6722 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6725 if (chan->imtu < skb->len)
6728 if (!chan->ops->recv(chan, skb))
6735 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6737 struct l2cap_hdr *lh = (void *) skb->data;
6741 skb_pull(skb, L2CAP_HDR_SIZE);
6742 cid = __le16_to_cpu(lh->cid);
6743 len = __le16_to_cpu(lh->len);
6745 if (len != skb->len) {
6750 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6753 case L2CAP_CID_SIGNALING:
6754 l2cap_sig_channel(conn, skb);
6757 case L2CAP_CID_CONN_LESS:
6758 psm = get_unaligned((__le16 *) skb->data);
6759 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6760 l2cap_conless_channel(conn, psm, skb);
6764 l2cap_att_channel(conn, skb);
6767 case L2CAP_CID_LE_SIGNALING:
6768 l2cap_le_sig_channel(conn, skb);
6772 if (smp_sig_channel(conn, skb))
6773 l2cap_conn_del(conn->hcon, EACCES);
6777 l2cap_data_channel(conn, cid, skb);
6782 /* ---- L2CAP interface with lower layer (HCI) ---- */
6784 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6786 int exact = 0, lm1 = 0, lm2 = 0;
6787 struct l2cap_chan *c;
6789 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6791 /* Find listening sockets and check their link_mode */
6792 read_lock(&chan_list_lock);
6793 list_for_each_entry(c, &chan_list, global_l) {
6794 if (c->state != BT_LISTEN)
6797 if (!bacmp(&c->src, &hdev->bdaddr)) {
6798 lm1 |= HCI_LM_ACCEPT;
6799 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6800 lm1 |= HCI_LM_MASTER;
6802 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6803 lm2 |= HCI_LM_ACCEPT;
6804 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6805 lm2 |= HCI_LM_MASTER;
6808 read_unlock(&chan_list_lock);
6810 return exact ? lm1 : lm2;
6813 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6815 struct l2cap_conn *conn;
6817 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6820 conn = l2cap_conn_add(hcon);
6822 l2cap_conn_ready(conn);
6824 l2cap_conn_del(hcon, bt_to_errno(status));
6828 int l2cap_disconn_ind(struct hci_conn *hcon)
6830 struct l2cap_conn *conn = hcon->l2cap_data;
6832 BT_DBG("hcon %p", hcon);
6835 return HCI_ERROR_REMOTE_USER_TERM;
6836 return conn->disc_reason;
6839 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6841 BT_DBG("hcon %p reason %d", hcon, reason);
6843 l2cap_conn_del(hcon, bt_to_errno(reason));
6846 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6848 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6851 if (encrypt == 0x00) {
6852 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6853 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6854 } else if (chan->sec_level == BT_SECURITY_HIGH)
6855 l2cap_chan_close(chan, ECONNREFUSED);
6857 if (chan->sec_level == BT_SECURITY_MEDIUM)
6858 __clear_chan_timer(chan);
6862 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6864 struct l2cap_conn *conn = hcon->l2cap_data;
6865 struct l2cap_chan *chan;
6870 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6872 if (hcon->type == LE_LINK) {
6873 if (!status && encrypt)
6874 smp_distribute_keys(conn, 0);
6875 cancel_delayed_work(&conn->security_timer);
6878 mutex_lock(&conn->chan_lock);
6880 list_for_each_entry(chan, &conn->chan_l, list) {
6881 l2cap_chan_lock(chan);
6883 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6884 state_to_string(chan->state));
6886 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6887 l2cap_chan_unlock(chan);
6891 if (chan->scid == L2CAP_CID_ATT) {
6892 if (!status && encrypt) {
6893 chan->sec_level = hcon->sec_level;
6894 l2cap_chan_ready(chan);
6897 l2cap_chan_unlock(chan);
6901 if (!__l2cap_no_conn_pending(chan)) {
6902 l2cap_chan_unlock(chan);
6906 if (!status && (chan->state == BT_CONNECTED ||
6907 chan->state == BT_CONFIG)) {
6908 chan->ops->resume(chan);
6909 l2cap_check_encryption(chan, encrypt);
6910 l2cap_chan_unlock(chan);
6914 if (chan->state == BT_CONNECT) {
6916 l2cap_start_connection(chan);
6918 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6919 } else if (chan->state == BT_CONNECT2) {
6920 struct l2cap_conn_rsp rsp;
6924 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6925 res = L2CAP_CR_PEND;
6926 stat = L2CAP_CS_AUTHOR_PEND;
6927 chan->ops->defer(chan);
6929 l2cap_state_change(chan, BT_CONFIG);
6930 res = L2CAP_CR_SUCCESS;
6931 stat = L2CAP_CS_NO_INFO;
6934 l2cap_state_change(chan, BT_DISCONN);
6935 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6936 res = L2CAP_CR_SEC_BLOCK;
6937 stat = L2CAP_CS_NO_INFO;
6940 rsp.scid = cpu_to_le16(chan->dcid);
6941 rsp.dcid = cpu_to_le16(chan->scid);
6942 rsp.result = cpu_to_le16(res);
6943 rsp.status = cpu_to_le16(stat);
6944 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6947 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6948 res == L2CAP_CR_SUCCESS) {
6950 set_bit(CONF_REQ_SENT, &chan->conf_state);
6951 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6953 l2cap_build_conf_req(chan, buf),
6955 chan->num_conf_req++;
6959 l2cap_chan_unlock(chan);
6962 mutex_unlock(&conn->chan_lock);
6967 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6969 struct l2cap_conn *conn = hcon->l2cap_data;
6970 struct l2cap_hdr *hdr;
6973 /* For AMP controller do not create l2cap conn */
6974 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6978 conn = l2cap_conn_add(hcon);
6983 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6987 case ACL_START_NO_FLUSH:
6990 BT_ERR("Unexpected start frame (len %d)", skb->len);
6991 kfree_skb(conn->rx_skb);
6992 conn->rx_skb = NULL;
6994 l2cap_conn_unreliable(conn, ECOMM);
6997 /* Start fragment always begin with Basic L2CAP header */
6998 if (skb->len < L2CAP_HDR_SIZE) {
6999 BT_ERR("Frame is too short (len %d)", skb->len);
7000 l2cap_conn_unreliable(conn, ECOMM);
7004 hdr = (struct l2cap_hdr *) skb->data;
7005 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7007 if (len == skb->len) {
7008 /* Complete frame received */
7009 l2cap_recv_frame(conn, skb);
7013 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7015 if (skb->len > len) {
7016 BT_ERR("Frame is too long (len %d, expected len %d)",
7018 l2cap_conn_unreliable(conn, ECOMM);
7022 /* Allocate skb for the complete frame (with header) */
7023 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7027 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7029 conn->rx_len = len - skb->len;
7033 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7035 if (!conn->rx_len) {
7036 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7037 l2cap_conn_unreliable(conn, ECOMM);
7041 if (skb->len > conn->rx_len) {
7042 BT_ERR("Fragment is too long (len %d, expected %d)",
7043 skb->len, conn->rx_len);
7044 kfree_skb(conn->rx_skb);
7045 conn->rx_skb = NULL;
7047 l2cap_conn_unreliable(conn, ECOMM);
7051 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7053 conn->rx_len -= skb->len;
7055 if (!conn->rx_len) {
7056 /* Complete frame received. l2cap_recv_frame
7057 * takes ownership of the skb so set the global
7058 * rx_skb pointer to NULL first.
7060 struct sk_buff *rx_skb = conn->rx_skb;
7061 conn->rx_skb = NULL;
7062 l2cap_recv_frame(conn, rx_skb);
7072 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7074 struct l2cap_chan *c;
7076 read_lock(&chan_list_lock);
7078 list_for_each_entry(c, &chan_list, global_l) {
7079 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7081 c->state, __le16_to_cpu(c->psm),
7082 c->scid, c->dcid, c->imtu, c->omtu,
7083 c->sec_level, c->mode);
7086 read_unlock(&chan_list_lock);
7091 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7093 return single_open(file, l2cap_debugfs_show, inode->i_private);
7096 static const struct file_operations l2cap_debugfs_fops = {
7097 .open = l2cap_debugfs_open,
7099 .llseek = seq_lseek,
7100 .release = single_release,
7103 static struct dentry *l2cap_debugfs;
7105 int __init l2cap_init(void)
7109 err = l2cap_init_sockets();
7113 if (IS_ERR_OR_NULL(bt_debugfs))
7116 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7117 NULL, &l2cap_debugfs_fops);
7122 void l2cap_exit(void)
7124 debugfs_remove(l2cap_debugfs);
7125 l2cap_cleanup_sockets();
7128 module_param(disable_ertm, bool, 0644);
7129 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");