2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
58 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59 u8 code, u8 ident, u16 dlen, void *data);
60 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
62 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
65 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66 struct sk_buff_head *skbs, u8 event);
68 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
70 if (hcon->type == LE_LINK) {
71 if (type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
74 return BDADDR_LE_RANDOM;
80 /* ---- L2CAP channels ---- */
82 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
87 list_for_each_entry(c, &conn->chan_l, list) {
94 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
99 list_for_each_entry(c, &conn->chan_l, list) {
106 /* Find channel with given SCID.
107 * Returns locked channel. */
108 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
111 struct l2cap_chan *c;
113 mutex_lock(&conn->chan_lock);
114 c = __l2cap_get_chan_by_scid(conn, cid);
117 mutex_unlock(&conn->chan_lock);
122 /* Find channel with given DCID.
123 * Returns locked channel.
125 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
128 struct l2cap_chan *c;
130 mutex_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_dcid(conn, cid);
134 mutex_unlock(&conn->chan_lock);
139 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
142 struct l2cap_chan *c;
144 list_for_each_entry(c, &conn->chan_l, list) {
145 if (c->ident == ident)
151 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
154 struct l2cap_chan *c;
156 mutex_lock(&conn->chan_lock);
157 c = __l2cap_get_chan_by_ident(conn, ident);
160 mutex_unlock(&conn->chan_lock);
165 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
167 struct l2cap_chan *c;
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (c->sport == psm && !bacmp(&c->src, src))
176 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
180 write_lock(&chan_list_lock);
182 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
195 for (p = 0x1001; p < 0x1100; p += 2)
196 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197 chan->psm = cpu_to_le16(p);
198 chan->sport = cpu_to_le16(p);
205 write_unlock(&chan_list_lock);
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
211 write_lock(&chan_list_lock);
215 write_unlock(&chan_list_lock);
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
227 dyn_end = L2CAP_CID_DYN_END;
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
243 chan->ops->state_change(chan, state, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
250 chan->ops->state_change(chan, chan->state, err);
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
255 chan->ops->state_change(chan, chan->state, err);
258 static void __set_retrans_timer(struct l2cap_chan *chan)
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
267 static void __set_monitor_timer(struct l2cap_chan *chan)
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
302 size_t alloc_size, i;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size = roundup_pow_of_two(size);
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
325 kfree(seq_list->list);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
367 u16 mask = seq_list->mask;
369 /* All appends happen in constant time */
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
377 seq_list->list[seq_list->tail & mask] = seq;
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
383 static void l2cap_chan_timeout(struct work_struct *work)
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
387 struct l2cap_conn *conn = chan->conn;
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
403 l2cap_chan_close(chan, reason);
405 l2cap_chan_unlock(chan);
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
410 l2cap_chan_put(chan);
413 struct l2cap_chan *l2cap_chan_create(void)
415 struct l2cap_chan *chan;
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
421 mutex_init(&chan->lock);
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
429 chan->state = BT_OPEN;
431 kref_init(&chan->kref);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
436 BT_DBG("chan %p", chan);
441 static void l2cap_chan_destroy(struct kref *kref)
443 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
445 BT_DBG("chan %p", chan);
447 write_lock(&chan_list_lock);
448 list_del(&chan->global_l);
449 write_unlock(&chan_list_lock);
454 void l2cap_chan_hold(struct l2cap_chan *c)
456 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
461 void l2cap_chan_put(struct l2cap_chan *c)
463 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
465 kref_put(&c->kref, l2cap_chan_destroy);
468 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
470 chan->fcs = L2CAP_FCS_CRC16;
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->remote_max_tx = chan->max_tx;
475 chan->remote_tx_win = chan->tx_win;
476 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
477 chan->sec_level = BT_SECURITY_LOW;
478 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
479 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
480 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
481 chan->conf_state = 0;
483 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
486 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
489 chan->sdu_last_frag = NULL;
491 chan->tx_credits = 0;
492 chan->rx_credits = le_max_credits;
493 chan->mps = min_t(u16, chan->imtu, le_default_mps);
495 skb_queue_head_init(&chan->tx_q);
498 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
500 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
501 __le16_to_cpu(chan->psm), chan->dcid);
503 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
507 switch (chan->chan_type) {
508 case L2CAP_CHAN_CONN_ORIENTED:
509 /* Alloc CID for connection-oriented socket */
510 chan->scid = l2cap_alloc_cid(conn);
511 if (conn->hcon->type == ACL_LINK)
512 chan->omtu = L2CAP_DEFAULT_MTU;
515 case L2CAP_CHAN_CONN_LESS:
516 /* Connectionless socket */
517 chan->scid = L2CAP_CID_CONN_LESS;
518 chan->dcid = L2CAP_CID_CONN_LESS;
519 chan->omtu = L2CAP_DEFAULT_MTU;
522 case L2CAP_CHAN_FIXED:
523 /* Caller will set CID and CID specific MTU values */
527 /* Raw socket can send/recv signalling messages only */
528 chan->scid = L2CAP_CID_SIGNALING;
529 chan->dcid = L2CAP_CID_SIGNALING;
530 chan->omtu = L2CAP_DEFAULT_MTU;
533 chan->local_id = L2CAP_BESTEFFORT_ID;
534 chan->local_stype = L2CAP_SERV_BESTEFFORT;
535 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
536 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
537 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
538 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
540 l2cap_chan_hold(chan);
542 hci_conn_hold(conn->hcon);
544 list_add(&chan->list, &conn->chan_l);
547 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
549 mutex_lock(&conn->chan_lock);
550 __l2cap_chan_add(conn, chan);
551 mutex_unlock(&conn->chan_lock);
554 void l2cap_chan_del(struct l2cap_chan *chan, int err)
556 struct l2cap_conn *conn = chan->conn;
558 __clear_chan_timer(chan);
560 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563 struct amp_mgr *mgr = conn->hcon->amp_mgr;
564 /* Delete from channel list */
565 list_del(&chan->list);
567 l2cap_chan_put(chan);
571 if (chan->scid != L2CAP_CID_A2MP)
572 hci_conn_drop(conn->hcon);
574 if (mgr && mgr->bredr_chan == chan)
575 mgr->bredr_chan = NULL;
578 if (chan->hs_hchan) {
579 struct hci_chan *hs_hchan = chan->hs_hchan;
581 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
582 amp_disconnect_logical_link(hs_hchan);
585 chan->ops->teardown(chan, err);
587 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
591 case L2CAP_MODE_BASIC:
594 case L2CAP_MODE_LE_FLOWCTL:
595 skb_queue_purge(&chan->tx_q);
598 case L2CAP_MODE_ERTM:
599 __clear_retrans_timer(chan);
600 __clear_monitor_timer(chan);
601 __clear_ack_timer(chan);
603 skb_queue_purge(&chan->srej_q);
605 l2cap_seq_list_free(&chan->srej_list);
606 l2cap_seq_list_free(&chan->retrans_list);
610 case L2CAP_MODE_STREAMING:
611 skb_queue_purge(&chan->tx_q);
618 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
620 struct l2cap_conn *conn = hcon->l2cap_data;
621 struct l2cap_chan *chan;
623 mutex_lock(&conn->chan_lock);
625 list_for_each_entry(chan, &conn->chan_l, list) {
626 l2cap_chan_lock(chan);
627 bacpy(&chan->dst, &hcon->dst);
628 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
629 l2cap_chan_unlock(chan);
632 mutex_unlock(&conn->chan_lock);
635 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
637 struct l2cap_conn *conn = chan->conn;
638 struct l2cap_le_conn_rsp rsp;
641 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
642 result = L2CAP_CR_AUTHORIZATION;
644 result = L2CAP_CR_BAD_PSM;
646 l2cap_state_change(chan, BT_DISCONN);
648 rsp.dcid = cpu_to_le16(chan->scid);
649 rsp.mtu = cpu_to_le16(chan->imtu);
650 rsp.mps = cpu_to_le16(chan->mps);
651 rsp.credits = cpu_to_le16(chan->rx_credits);
652 rsp.result = cpu_to_le16(result);
654 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
658 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
660 struct l2cap_conn *conn = chan->conn;
661 struct l2cap_conn_rsp rsp;
664 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
665 result = L2CAP_CR_SEC_BLOCK;
667 result = L2CAP_CR_BAD_PSM;
669 l2cap_state_change(chan, BT_DISCONN);
671 rsp.scid = cpu_to_le16(chan->dcid);
672 rsp.dcid = cpu_to_le16(chan->scid);
673 rsp.result = cpu_to_le16(result);
674 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
679 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
681 struct l2cap_conn *conn = chan->conn;
683 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
685 switch (chan->state) {
687 chan->ops->teardown(chan, 0);
692 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
693 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
694 l2cap_send_disconn_req(chan, reason);
696 l2cap_chan_del(chan, reason);
700 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
701 if (conn->hcon->type == ACL_LINK)
702 l2cap_chan_connect_reject(chan);
703 else if (conn->hcon->type == LE_LINK)
704 l2cap_chan_le_connect_reject(chan);
707 l2cap_chan_del(chan, reason);
712 l2cap_chan_del(chan, reason);
716 chan->ops->teardown(chan, 0);
721 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
723 switch (chan->chan_type) {
725 switch (chan->sec_level) {
726 case BT_SECURITY_HIGH:
727 case BT_SECURITY_FIPS:
728 return HCI_AT_DEDICATED_BONDING_MITM;
729 case BT_SECURITY_MEDIUM:
730 return HCI_AT_DEDICATED_BONDING;
732 return HCI_AT_NO_BONDING;
735 case L2CAP_CHAN_CONN_LESS:
736 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
737 if (chan->sec_level == BT_SECURITY_LOW)
738 chan->sec_level = BT_SECURITY_SDP;
740 if (chan->sec_level == BT_SECURITY_HIGH ||
741 chan->sec_level == BT_SECURITY_FIPS)
742 return HCI_AT_NO_BONDING_MITM;
744 return HCI_AT_NO_BONDING;
746 case L2CAP_CHAN_CONN_ORIENTED:
747 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
748 if (chan->sec_level == BT_SECURITY_LOW)
749 chan->sec_level = BT_SECURITY_SDP;
751 if (chan->sec_level == BT_SECURITY_HIGH ||
752 chan->sec_level == BT_SECURITY_FIPS)
753 return HCI_AT_NO_BONDING_MITM;
755 return HCI_AT_NO_BONDING;
759 switch (chan->sec_level) {
760 case BT_SECURITY_HIGH:
761 case BT_SECURITY_FIPS:
762 return HCI_AT_GENERAL_BONDING_MITM;
763 case BT_SECURITY_MEDIUM:
764 return HCI_AT_GENERAL_BONDING;
766 return HCI_AT_NO_BONDING;
772 /* Service level security */
773 int l2cap_chan_check_security(struct l2cap_chan *chan)
775 struct l2cap_conn *conn = chan->conn;
778 if (conn->hcon->type == LE_LINK)
779 return smp_conn_security(conn->hcon, chan->sec_level);
781 auth_type = l2cap_get_auth_type(chan);
783 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
786 static u8 l2cap_get_ident(struct l2cap_conn *conn)
790 /* Get next available identificator.
791 * 1 - 128 are used by kernel.
792 * 129 - 199 are reserved.
793 * 200 - 254 are used by utilities like l2ping, etc.
796 spin_lock(&conn->lock);
798 if (++conn->tx_ident > 128)
803 spin_unlock(&conn->lock);
808 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
811 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
814 BT_DBG("code 0x%2.2x", code);
819 if (lmp_no_flush_capable(conn->hcon->hdev))
820 flags = ACL_START_NO_FLUSH;
824 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
825 skb->priority = HCI_PRIO_MAX;
827 hci_send_acl(conn->hchan, skb, flags);
830 static bool __chan_is_moving(struct l2cap_chan *chan)
832 return chan->move_state != L2CAP_MOVE_STABLE &&
833 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
836 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
838 struct hci_conn *hcon = chan->conn->hcon;
841 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
844 if (chan->hs_hcon && !__chan_is_moving(chan)) {
846 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
853 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
854 lmp_no_flush_capable(hcon->hdev))
855 flags = ACL_START_NO_FLUSH;
859 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
860 hci_send_acl(chan->conn->hchan, skb, flags);
863 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
865 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
866 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
868 if (enh & L2CAP_CTRL_FRAME_TYPE) {
871 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
872 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
879 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
880 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
887 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
889 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
890 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
892 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
895 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
896 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
903 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
904 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
911 static inline void __unpack_control(struct l2cap_chan *chan,
914 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
915 __unpack_extended_control(get_unaligned_le32(skb->data),
916 &bt_cb(skb)->control);
917 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
919 __unpack_enhanced_control(get_unaligned_le16(skb->data),
920 &bt_cb(skb)->control);
921 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
925 static u32 __pack_extended_control(struct l2cap_ctrl *control)
929 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
930 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
932 if (control->sframe) {
933 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
934 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
935 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
937 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
938 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
944 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
948 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
949 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
951 if (control->sframe) {
952 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
953 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
954 packed |= L2CAP_CTRL_FRAME_TYPE;
956 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
957 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
963 static inline void __pack_control(struct l2cap_chan *chan,
964 struct l2cap_ctrl *control,
967 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
968 put_unaligned_le32(__pack_extended_control(control),
969 skb->data + L2CAP_HDR_SIZE);
971 put_unaligned_le16(__pack_enhanced_control(control),
972 skb->data + L2CAP_HDR_SIZE);
976 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
978 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
979 return L2CAP_EXT_HDR_SIZE;
981 return L2CAP_ENH_HDR_SIZE;
984 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
988 struct l2cap_hdr *lh;
989 int hlen = __ertm_hdr_size(chan);
991 if (chan->fcs == L2CAP_FCS_CRC16)
992 hlen += L2CAP_FCS_SIZE;
994 skb = bt_skb_alloc(hlen, GFP_KERNEL);
997 return ERR_PTR(-ENOMEM);
999 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1000 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1001 lh->cid = cpu_to_le16(chan->dcid);
1003 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1004 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1006 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1008 if (chan->fcs == L2CAP_FCS_CRC16) {
1009 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1010 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1013 skb->priority = HCI_PRIO_MAX;
1017 static void l2cap_send_sframe(struct l2cap_chan *chan,
1018 struct l2cap_ctrl *control)
1020 struct sk_buff *skb;
1023 BT_DBG("chan %p, control %p", chan, control);
1025 if (!control->sframe)
1028 if (__chan_is_moving(chan))
1031 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1035 if (control->super == L2CAP_SUPER_RR)
1036 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1037 else if (control->super == L2CAP_SUPER_RNR)
1038 set_bit(CONN_RNR_SENT, &chan->conn_state);
1040 if (control->super != L2CAP_SUPER_SREJ) {
1041 chan->last_acked_seq = control->reqseq;
1042 __clear_ack_timer(chan);
1045 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1046 control->final, control->poll, control->super);
1048 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1049 control_field = __pack_extended_control(control);
1051 control_field = __pack_enhanced_control(control);
1053 skb = l2cap_create_sframe_pdu(chan, control_field);
1055 l2cap_do_send(chan, skb);
1058 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1060 struct l2cap_ctrl control;
1062 BT_DBG("chan %p, poll %d", chan, poll);
1064 memset(&control, 0, sizeof(control));
1066 control.poll = poll;
1068 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1069 control.super = L2CAP_SUPER_RNR;
1071 control.super = L2CAP_SUPER_RR;
1073 control.reqseq = chan->buffer_seq;
1074 l2cap_send_sframe(chan, &control);
1077 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1079 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1082 static bool __amp_capable(struct l2cap_chan *chan)
1084 struct l2cap_conn *conn = chan->conn;
1085 struct hci_dev *hdev;
1086 bool amp_available = false;
1088 if (!conn->hs_enabled)
1091 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1094 read_lock(&hci_dev_list_lock);
1095 list_for_each_entry(hdev, &hci_dev_list, list) {
1096 if (hdev->amp_type != AMP_TYPE_BREDR &&
1097 test_bit(HCI_UP, &hdev->flags)) {
1098 amp_available = true;
1102 read_unlock(&hci_dev_list_lock);
1104 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1105 return amp_available;
1110 static bool l2cap_check_efs(struct l2cap_chan *chan)
1112 /* Check EFS parameters */
1116 void l2cap_send_conn_req(struct l2cap_chan *chan)
1118 struct l2cap_conn *conn = chan->conn;
1119 struct l2cap_conn_req req;
1121 req.scid = cpu_to_le16(chan->scid);
1122 req.psm = chan->psm;
1124 chan->ident = l2cap_get_ident(conn);
1126 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1128 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1131 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1133 struct l2cap_create_chan_req req;
1134 req.scid = cpu_to_le16(chan->scid);
1135 req.psm = chan->psm;
1136 req.amp_id = amp_id;
1138 chan->ident = l2cap_get_ident(chan->conn);
1140 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1144 static void l2cap_move_setup(struct l2cap_chan *chan)
1146 struct sk_buff *skb;
1148 BT_DBG("chan %p", chan);
1150 if (chan->mode != L2CAP_MODE_ERTM)
1153 __clear_retrans_timer(chan);
1154 __clear_monitor_timer(chan);
1155 __clear_ack_timer(chan);
1157 chan->retry_count = 0;
1158 skb_queue_walk(&chan->tx_q, skb) {
1159 if (bt_cb(skb)->control.retries)
1160 bt_cb(skb)->control.retries = 1;
1165 chan->expected_tx_seq = chan->buffer_seq;
1167 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1168 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1169 l2cap_seq_list_clear(&chan->retrans_list);
1170 l2cap_seq_list_clear(&chan->srej_list);
1171 skb_queue_purge(&chan->srej_q);
1173 chan->tx_state = L2CAP_TX_STATE_XMIT;
1174 chan->rx_state = L2CAP_RX_STATE_MOVE;
1176 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1179 static void l2cap_move_done(struct l2cap_chan *chan)
1181 u8 move_role = chan->move_role;
1182 BT_DBG("chan %p", chan);
1184 chan->move_state = L2CAP_MOVE_STABLE;
1185 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1187 if (chan->mode != L2CAP_MODE_ERTM)
1190 switch (move_role) {
1191 case L2CAP_MOVE_ROLE_INITIATOR:
1192 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1193 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1195 case L2CAP_MOVE_ROLE_RESPONDER:
1196 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1201 static void l2cap_chan_ready(struct l2cap_chan *chan)
1203 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1204 chan->conf_state = 0;
1205 __clear_chan_timer(chan);
1207 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1208 chan->ops->suspend(chan);
1210 chan->state = BT_CONNECTED;
1212 chan->ops->ready(chan);
1215 static void l2cap_le_connect(struct l2cap_chan *chan)
1217 struct l2cap_conn *conn = chan->conn;
1218 struct l2cap_le_conn_req req;
1220 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1223 req.psm = chan->psm;
1224 req.scid = cpu_to_le16(chan->scid);
1225 req.mtu = cpu_to_le16(chan->imtu);
1226 req.mps = cpu_to_le16(chan->mps);
1227 req.credits = cpu_to_le16(chan->rx_credits);
1229 chan->ident = l2cap_get_ident(conn);
1231 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1235 static void l2cap_le_start(struct l2cap_chan *chan)
1237 struct l2cap_conn *conn = chan->conn;
1239 if (!smp_conn_security(conn->hcon, chan->sec_level))
1243 l2cap_chan_ready(chan);
1247 if (chan->state == BT_CONNECT)
1248 l2cap_le_connect(chan);
1251 static void l2cap_start_connection(struct l2cap_chan *chan)
1253 if (__amp_capable(chan)) {
1254 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1255 a2mp_discover_amp(chan);
1256 } else if (chan->conn->hcon->type == LE_LINK) {
1257 l2cap_le_start(chan);
1259 l2cap_send_conn_req(chan);
1263 static void l2cap_do_start(struct l2cap_chan *chan)
1265 struct l2cap_conn *conn = chan->conn;
1267 if (conn->hcon->type == LE_LINK) {
1268 l2cap_le_start(chan);
1272 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1273 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1276 if (l2cap_chan_check_security(chan) &&
1277 __l2cap_no_conn_pending(chan)) {
1278 l2cap_start_connection(chan);
1281 struct l2cap_info_req req;
1282 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1285 conn->info_ident = l2cap_get_ident(conn);
1287 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1289 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1294 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1296 u32 local_feat_mask = l2cap_feat_mask;
1298 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1301 case L2CAP_MODE_ERTM:
1302 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1303 case L2CAP_MODE_STREAMING:
1304 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1310 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1312 struct l2cap_conn *conn = chan->conn;
1313 struct l2cap_disconn_req req;
1318 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1319 __clear_retrans_timer(chan);
1320 __clear_monitor_timer(chan);
1321 __clear_ack_timer(chan);
1324 if (chan->scid == L2CAP_CID_A2MP) {
1325 l2cap_state_change(chan, BT_DISCONN);
1329 req.dcid = cpu_to_le16(chan->dcid);
1330 req.scid = cpu_to_le16(chan->scid);
1331 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1334 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1337 /* ---- L2CAP connections ---- */
1338 static void l2cap_conn_start(struct l2cap_conn *conn)
1340 struct l2cap_chan *chan, *tmp;
1342 BT_DBG("conn %p", conn);
1344 mutex_lock(&conn->chan_lock);
1346 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1347 l2cap_chan_lock(chan);
1349 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1350 l2cap_chan_unlock(chan);
1354 if (chan->state == BT_CONNECT) {
1355 if (!l2cap_chan_check_security(chan) ||
1356 !__l2cap_no_conn_pending(chan)) {
1357 l2cap_chan_unlock(chan);
1361 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1362 && test_bit(CONF_STATE2_DEVICE,
1363 &chan->conf_state)) {
1364 l2cap_chan_close(chan, ECONNRESET);
1365 l2cap_chan_unlock(chan);
1369 l2cap_start_connection(chan);
1371 } else if (chan->state == BT_CONNECT2) {
1372 struct l2cap_conn_rsp rsp;
1374 rsp.scid = cpu_to_le16(chan->dcid);
1375 rsp.dcid = cpu_to_le16(chan->scid);
1377 if (l2cap_chan_check_security(chan)) {
1378 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1379 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1381 chan->ops->defer(chan);
1384 l2cap_state_change(chan, BT_CONFIG);
1385 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1386 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1389 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1390 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1393 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1396 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1397 rsp.result != L2CAP_CR_SUCCESS) {
1398 l2cap_chan_unlock(chan);
1402 set_bit(CONF_REQ_SENT, &chan->conf_state);
1403 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1404 l2cap_build_conf_req(chan, buf), buf);
1405 chan->num_conf_req++;
1408 l2cap_chan_unlock(chan);
1411 mutex_unlock(&conn->chan_lock);
1414 /* Find socket with cid and source/destination bdaddr.
1415 * Returns closest match, locked.
1417 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1421 struct l2cap_chan *c, *c1 = NULL;
1423 read_lock(&chan_list_lock);
1425 list_for_each_entry(c, &chan_list, global_l) {
1426 if (state && c->state != state)
1429 if (c->scid == cid) {
1430 int src_match, dst_match;
1431 int src_any, dst_any;
1434 src_match = !bacmp(&c->src, src);
1435 dst_match = !bacmp(&c->dst, dst);
1436 if (src_match && dst_match) {
1437 read_unlock(&chan_list_lock);
1442 src_any = !bacmp(&c->src, BDADDR_ANY);
1443 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1444 if ((src_match && dst_any) || (src_any && dst_match) ||
1445 (src_any && dst_any))
1450 read_unlock(&chan_list_lock);
1455 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1457 struct hci_conn *hcon = conn->hcon;
1458 struct l2cap_chan *chan, *pchan;
1463 bt_6lowpan_add_conn(conn);
1465 /* Check if we have socket listening on cid */
1466 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1467 &hcon->src, &hcon->dst);
1471 /* Client ATT sockets should override the server one */
1472 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1475 dst_type = bdaddr_type(hcon, hcon->dst_type);
1477 /* If device is blocked, do not create a channel for it */
1478 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1481 l2cap_chan_lock(pchan);
1483 chan = pchan->ops->new_connection(pchan);
1487 bacpy(&chan->src, &hcon->src);
1488 bacpy(&chan->dst, &hcon->dst);
1489 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1490 chan->dst_type = dst_type;
1492 __l2cap_chan_add(conn, chan);
1495 l2cap_chan_unlock(pchan);
1498 static void l2cap_conn_ready(struct l2cap_conn *conn)
1500 struct l2cap_chan *chan;
1501 struct hci_conn *hcon = conn->hcon;
1503 BT_DBG("conn %p", conn);
1505 /* For outgoing pairing which doesn't necessarily have an
1506 * associated socket (e.g. mgmt_pair_device).
1508 if (hcon->out && hcon->type == LE_LINK)
1509 smp_conn_security(hcon, hcon->pending_sec_level);
1511 mutex_lock(&conn->chan_lock);
1513 if (hcon->type == LE_LINK)
1514 l2cap_le_conn_ready(conn);
1516 list_for_each_entry(chan, &conn->chan_l, list) {
1518 l2cap_chan_lock(chan);
1520 if (chan->scid == L2CAP_CID_A2MP) {
1521 l2cap_chan_unlock(chan);
1525 if (hcon->type == LE_LINK) {
1526 l2cap_le_start(chan);
1527 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 l2cap_chan_ready(chan);
1530 } else if (chan->state == BT_CONNECT) {
1531 l2cap_do_start(chan);
1534 l2cap_chan_unlock(chan);
1537 mutex_unlock(&conn->chan_lock);
1539 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1542 /* Notify sockets that we cannot guaranty reliability anymore */
1543 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1545 struct l2cap_chan *chan;
1547 BT_DBG("conn %p", conn);
1549 mutex_lock(&conn->chan_lock);
1551 list_for_each_entry(chan, &conn->chan_l, list) {
1552 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1553 l2cap_chan_set_err(chan, err);
1556 mutex_unlock(&conn->chan_lock);
1559 static void l2cap_info_timeout(struct work_struct *work)
1561 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1564 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1565 conn->info_ident = 0;
1567 l2cap_conn_start(conn);
1572 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1573 * callback is called during registration. The ->remove callback is called
1574 * during unregistration.
1575 * An l2cap_user object can either be explicitly unregistered or when the
1576 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1577 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1578 * External modules must own a reference to the l2cap_conn object if they intend
1579 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1580 * any time if they don't.
1583 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1585 struct hci_dev *hdev = conn->hcon->hdev;
1588 /* We need to check whether l2cap_conn is registered. If it is not, we
1589 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1590 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1591 * relies on the parent hci_conn object to be locked. This itself relies
1592 * on the hci_dev object to be locked. So we must lock the hci device
1597 if (user->list.next || user->list.prev) {
1602 /* conn->hchan is NULL after l2cap_conn_del() was called */
1608 ret = user->probe(conn, user);
1612 list_add(&user->list, &conn->users);
1616 hci_dev_unlock(hdev);
1619 EXPORT_SYMBOL(l2cap_register_user);
1621 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1623 struct hci_dev *hdev = conn->hcon->hdev;
1627 if (!user->list.next || !user->list.prev)
1630 list_del(&user->list);
1631 user->list.next = NULL;
1632 user->list.prev = NULL;
1633 user->remove(conn, user);
1636 hci_dev_unlock(hdev);
1638 EXPORT_SYMBOL(l2cap_unregister_user);
1640 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1642 struct l2cap_user *user;
1644 while (!list_empty(&conn->users)) {
1645 user = list_first_entry(&conn->users, struct l2cap_user, list);
1646 list_del(&user->list);
1647 user->list.next = NULL;
1648 user->list.prev = NULL;
1649 user->remove(conn, user);
1653 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1655 struct l2cap_conn *conn = hcon->l2cap_data;
1656 struct l2cap_chan *chan, *l;
1661 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1663 kfree_skb(conn->rx_skb);
1665 skb_queue_purge(&conn->pending_rx);
1667 /* We can not call flush_work(&conn->pending_rx_work) here since we
1668 * might block if we are running on a worker from the same workqueue
1669 * pending_rx_work is waiting on.
1671 if (work_pending(&conn->pending_rx_work))
1672 cancel_work_sync(&conn->pending_rx_work);
1674 l2cap_unregister_all_users(conn);
1676 mutex_lock(&conn->chan_lock);
1679 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1680 l2cap_chan_hold(chan);
1681 l2cap_chan_lock(chan);
1683 l2cap_chan_del(chan, err);
1685 l2cap_chan_unlock(chan);
1687 chan->ops->close(chan);
1688 l2cap_chan_put(chan);
1691 mutex_unlock(&conn->chan_lock);
1693 hci_chan_del(conn->hchan);
1695 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1696 cancel_delayed_work_sync(&conn->info_timer);
1698 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1699 cancel_delayed_work_sync(&conn->security_timer);
1700 smp_chan_destroy(conn);
1703 hcon->l2cap_data = NULL;
1705 l2cap_conn_put(conn);
1708 static void security_timeout(struct work_struct *work)
1710 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1711 security_timer.work);
1713 BT_DBG("conn %p", conn);
1715 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1716 smp_chan_destroy(conn);
1717 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1721 static void l2cap_conn_free(struct kref *ref)
1723 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1725 hci_conn_put(conn->hcon);
1729 void l2cap_conn_get(struct l2cap_conn *conn)
1731 kref_get(&conn->ref);
1733 EXPORT_SYMBOL(l2cap_conn_get);
1735 void l2cap_conn_put(struct l2cap_conn *conn)
1737 kref_put(&conn->ref, l2cap_conn_free);
1739 EXPORT_SYMBOL(l2cap_conn_put);
1741 /* ---- Socket interface ---- */
1743 /* Find socket with psm and source / destination bdaddr.
1744 * Returns closest match.
1746 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1751 struct l2cap_chan *c, *c1 = NULL;
1753 read_lock(&chan_list_lock);
1755 list_for_each_entry(c, &chan_list, global_l) {
1756 if (state && c->state != state)
1759 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1762 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1765 if (c->psm == psm) {
1766 int src_match, dst_match;
1767 int src_any, dst_any;
1770 src_match = !bacmp(&c->src, src);
1771 dst_match = !bacmp(&c->dst, dst);
1772 if (src_match && dst_match) {
1773 read_unlock(&chan_list_lock);
1778 src_any = !bacmp(&c->src, BDADDR_ANY);
1779 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1780 if ((src_match && dst_any) || (src_any && dst_match) ||
1781 (src_any && dst_any))
1786 read_unlock(&chan_list_lock);
1791 static void l2cap_monitor_timeout(struct work_struct *work)
1793 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1794 monitor_timer.work);
1796 BT_DBG("chan %p", chan);
1798 l2cap_chan_lock(chan);
1801 l2cap_chan_unlock(chan);
1802 l2cap_chan_put(chan);
1806 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1808 l2cap_chan_unlock(chan);
1809 l2cap_chan_put(chan);
1812 static void l2cap_retrans_timeout(struct work_struct *work)
1814 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1815 retrans_timer.work);
1817 BT_DBG("chan %p", chan);
1819 l2cap_chan_lock(chan);
1822 l2cap_chan_unlock(chan);
1823 l2cap_chan_put(chan);
1827 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1828 l2cap_chan_unlock(chan);
1829 l2cap_chan_put(chan);
1832 static void l2cap_streaming_send(struct l2cap_chan *chan,
1833 struct sk_buff_head *skbs)
1835 struct sk_buff *skb;
1836 struct l2cap_ctrl *control;
1838 BT_DBG("chan %p, skbs %p", chan, skbs);
1840 if (__chan_is_moving(chan))
1843 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1845 while (!skb_queue_empty(&chan->tx_q)) {
1847 skb = skb_dequeue(&chan->tx_q);
1849 bt_cb(skb)->control.retries = 1;
1850 control = &bt_cb(skb)->control;
1852 control->reqseq = 0;
1853 control->txseq = chan->next_tx_seq;
1855 __pack_control(chan, control, skb);
1857 if (chan->fcs == L2CAP_FCS_CRC16) {
1858 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1859 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1862 l2cap_do_send(chan, skb);
1864 BT_DBG("Sent txseq %u", control->txseq);
1866 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1867 chan->frames_sent++;
1871 static int l2cap_ertm_send(struct l2cap_chan *chan)
1873 struct sk_buff *skb, *tx_skb;
1874 struct l2cap_ctrl *control;
1877 BT_DBG("chan %p", chan);
1879 if (chan->state != BT_CONNECTED)
1882 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1885 if (__chan_is_moving(chan))
1888 while (chan->tx_send_head &&
1889 chan->unacked_frames < chan->remote_tx_win &&
1890 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1892 skb = chan->tx_send_head;
1894 bt_cb(skb)->control.retries = 1;
1895 control = &bt_cb(skb)->control;
1897 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1900 control->reqseq = chan->buffer_seq;
1901 chan->last_acked_seq = chan->buffer_seq;
1902 control->txseq = chan->next_tx_seq;
1904 __pack_control(chan, control, skb);
1906 if (chan->fcs == L2CAP_FCS_CRC16) {
1907 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1908 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1911 /* Clone after data has been modified. Data is assumed to be
1912 read-only (for locking purposes) on cloned sk_buffs.
1914 tx_skb = skb_clone(skb, GFP_KERNEL);
1919 __set_retrans_timer(chan);
1921 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1922 chan->unacked_frames++;
1923 chan->frames_sent++;
1926 if (skb_queue_is_last(&chan->tx_q, skb))
1927 chan->tx_send_head = NULL;
1929 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1931 l2cap_do_send(chan, tx_skb);
1932 BT_DBG("Sent txseq %u", control->txseq);
1935 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1936 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1941 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1943 struct l2cap_ctrl control;
1944 struct sk_buff *skb;
1945 struct sk_buff *tx_skb;
1948 BT_DBG("chan %p", chan);
1950 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1953 if (__chan_is_moving(chan))
1956 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1957 seq = l2cap_seq_list_pop(&chan->retrans_list);
1959 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1961 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1966 bt_cb(skb)->control.retries++;
1967 control = bt_cb(skb)->control;
1969 if (chan->max_tx != 0 &&
1970 bt_cb(skb)->control.retries > chan->max_tx) {
1971 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1972 l2cap_send_disconn_req(chan, ECONNRESET);
1973 l2cap_seq_list_clear(&chan->retrans_list);
1977 control.reqseq = chan->buffer_seq;
1978 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1983 if (skb_cloned(skb)) {
1984 /* Cloned sk_buffs are read-only, so we need a
1987 tx_skb = skb_copy(skb, GFP_KERNEL);
1989 tx_skb = skb_clone(skb, GFP_KERNEL);
1993 l2cap_seq_list_clear(&chan->retrans_list);
1997 /* Update skb contents */
1998 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1999 put_unaligned_le32(__pack_extended_control(&control),
2000 tx_skb->data + L2CAP_HDR_SIZE);
2002 put_unaligned_le16(__pack_enhanced_control(&control),
2003 tx_skb->data + L2CAP_HDR_SIZE);
2006 if (chan->fcs == L2CAP_FCS_CRC16) {
2007 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2008 put_unaligned_le16(fcs, skb_put(tx_skb,
2012 l2cap_do_send(chan, tx_skb);
2014 BT_DBG("Resent txseq %d", control.txseq);
2016 chan->last_acked_seq = chan->buffer_seq;
2020 static void l2cap_retransmit(struct l2cap_chan *chan,
2021 struct l2cap_ctrl *control)
2023 BT_DBG("chan %p, control %p", chan, control);
2025 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2026 l2cap_ertm_resend(chan);
2029 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2030 struct l2cap_ctrl *control)
2032 struct sk_buff *skb;
2034 BT_DBG("chan %p, control %p", chan, control);
2037 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2039 l2cap_seq_list_clear(&chan->retrans_list);
2041 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2044 if (chan->unacked_frames) {
2045 skb_queue_walk(&chan->tx_q, skb) {
2046 if (bt_cb(skb)->control.txseq == control->reqseq ||
2047 skb == chan->tx_send_head)
2051 skb_queue_walk_from(&chan->tx_q, skb) {
2052 if (skb == chan->tx_send_head)
2055 l2cap_seq_list_append(&chan->retrans_list,
2056 bt_cb(skb)->control.txseq);
2059 l2cap_ertm_resend(chan);
2063 static void l2cap_send_ack(struct l2cap_chan *chan)
2065 struct l2cap_ctrl control;
2066 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2067 chan->last_acked_seq);
2070 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2071 chan, chan->last_acked_seq, chan->buffer_seq);
2073 memset(&control, 0, sizeof(control));
2076 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2077 chan->rx_state == L2CAP_RX_STATE_RECV) {
2078 __clear_ack_timer(chan);
2079 control.super = L2CAP_SUPER_RNR;
2080 control.reqseq = chan->buffer_seq;
2081 l2cap_send_sframe(chan, &control);
2083 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2084 l2cap_ertm_send(chan);
2085 /* If any i-frames were sent, they included an ack */
2086 if (chan->buffer_seq == chan->last_acked_seq)
2090 /* Ack now if the window is 3/4ths full.
2091 * Calculate without mul or div
2093 threshold = chan->ack_win;
2094 threshold += threshold << 1;
2097 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2100 if (frames_to_ack >= threshold) {
2101 __clear_ack_timer(chan);
2102 control.super = L2CAP_SUPER_RR;
2103 control.reqseq = chan->buffer_seq;
2104 l2cap_send_sframe(chan, &control);
2109 __set_ack_timer(chan);
2113 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2114 struct msghdr *msg, int len,
2115 int count, struct sk_buff *skb)
2117 struct l2cap_conn *conn = chan->conn;
2118 struct sk_buff **frag;
2121 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2127 /* Continuation fragments (no L2CAP header) */
2128 frag = &skb_shinfo(skb)->frag_list;
2130 struct sk_buff *tmp;
2132 count = min_t(unsigned int, conn->mtu, len);
2134 tmp = chan->ops->alloc_skb(chan, count,
2135 msg->msg_flags & MSG_DONTWAIT);
2137 return PTR_ERR(tmp);
2141 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2144 (*frag)->priority = skb->priority;
2149 skb->len += (*frag)->len;
2150 skb->data_len += (*frag)->len;
2152 frag = &(*frag)->next;
2158 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2159 struct msghdr *msg, size_t len,
2162 struct l2cap_conn *conn = chan->conn;
2163 struct sk_buff *skb;
2164 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2165 struct l2cap_hdr *lh;
2167 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2168 __le16_to_cpu(chan->psm), len, priority);
2170 count = min_t(unsigned int, (conn->mtu - hlen), len);
2172 skb = chan->ops->alloc_skb(chan, count + hlen,
2173 msg->msg_flags & MSG_DONTWAIT);
2177 skb->priority = priority;
2179 /* Create L2CAP header */
2180 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2181 lh->cid = cpu_to_le16(chan->dcid);
2182 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2183 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2185 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2186 if (unlikely(err < 0)) {
2188 return ERR_PTR(err);
2193 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2194 struct msghdr *msg, size_t len,
2197 struct l2cap_conn *conn = chan->conn;
2198 struct sk_buff *skb;
2200 struct l2cap_hdr *lh;
2202 BT_DBG("chan %p len %zu", chan, len);
2204 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2206 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2207 msg->msg_flags & MSG_DONTWAIT);
2211 skb->priority = priority;
2213 /* Create L2CAP header */
2214 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2215 lh->cid = cpu_to_le16(chan->dcid);
2216 lh->len = cpu_to_le16(len);
2218 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2219 if (unlikely(err < 0)) {
2221 return ERR_PTR(err);
2226 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2227 struct msghdr *msg, size_t len,
2230 struct l2cap_conn *conn = chan->conn;
2231 struct sk_buff *skb;
2232 int err, count, hlen;
2233 struct l2cap_hdr *lh;
2235 BT_DBG("chan %p len %zu", chan, len);
2238 return ERR_PTR(-ENOTCONN);
2240 hlen = __ertm_hdr_size(chan);
2243 hlen += L2CAP_SDULEN_SIZE;
2245 if (chan->fcs == L2CAP_FCS_CRC16)
2246 hlen += L2CAP_FCS_SIZE;
2248 count = min_t(unsigned int, (conn->mtu - hlen), len);
2250 skb = chan->ops->alloc_skb(chan, count + hlen,
2251 msg->msg_flags & MSG_DONTWAIT);
2255 /* Create L2CAP header */
2256 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2257 lh->cid = cpu_to_le16(chan->dcid);
2258 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2260 /* Control header is populated later */
2261 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2262 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2264 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2267 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2269 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2270 if (unlikely(err < 0)) {
2272 return ERR_PTR(err);
2275 bt_cb(skb)->control.fcs = chan->fcs;
2276 bt_cb(skb)->control.retries = 0;
2280 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2281 struct sk_buff_head *seg_queue,
2282 struct msghdr *msg, size_t len)
2284 struct sk_buff *skb;
2289 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2291 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2292 * so fragmented skbs are not used. The HCI layer's handling
2293 * of fragmented skbs is not compatible with ERTM's queueing.
2296 /* PDU size is derived from the HCI MTU */
2297 pdu_len = chan->conn->mtu;
2299 /* Constrain PDU size for BR/EDR connections */
2301 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2303 /* Adjust for largest possible L2CAP overhead. */
2305 pdu_len -= L2CAP_FCS_SIZE;
2307 pdu_len -= __ertm_hdr_size(chan);
2309 /* Remote device may have requested smaller PDUs */
2310 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2312 if (len <= pdu_len) {
2313 sar = L2CAP_SAR_UNSEGMENTED;
2317 sar = L2CAP_SAR_START;
2319 pdu_len -= L2CAP_SDULEN_SIZE;
2323 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2326 __skb_queue_purge(seg_queue);
2327 return PTR_ERR(skb);
2330 bt_cb(skb)->control.sar = sar;
2331 __skb_queue_tail(seg_queue, skb);
2336 pdu_len += L2CAP_SDULEN_SIZE;
2339 if (len <= pdu_len) {
2340 sar = L2CAP_SAR_END;
2343 sar = L2CAP_SAR_CONTINUE;
2350 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2352 size_t len, u16 sdulen)
2354 struct l2cap_conn *conn = chan->conn;
2355 struct sk_buff *skb;
2356 int err, count, hlen;
2357 struct l2cap_hdr *lh;
2359 BT_DBG("chan %p len %zu", chan, len);
2362 return ERR_PTR(-ENOTCONN);
2364 hlen = L2CAP_HDR_SIZE;
2367 hlen += L2CAP_SDULEN_SIZE;
2369 count = min_t(unsigned int, (conn->mtu - hlen), len);
2371 skb = chan->ops->alloc_skb(chan, count + hlen,
2372 msg->msg_flags & MSG_DONTWAIT);
2376 /* Create L2CAP header */
2377 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2378 lh->cid = cpu_to_le16(chan->dcid);
2379 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2382 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2384 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2385 if (unlikely(err < 0)) {
2387 return ERR_PTR(err);
2393 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2394 struct sk_buff_head *seg_queue,
2395 struct msghdr *msg, size_t len)
2397 struct sk_buff *skb;
2401 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2403 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2405 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2408 pdu_len -= L2CAP_SDULEN_SIZE;
2414 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2416 __skb_queue_purge(seg_queue);
2417 return PTR_ERR(skb);
2420 __skb_queue_tail(seg_queue, skb);
2426 pdu_len += L2CAP_SDULEN_SIZE;
2433 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2436 struct sk_buff *skb;
2438 struct sk_buff_head seg_queue;
2443 /* Connectionless channel */
2444 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2445 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2447 return PTR_ERR(skb);
2449 /* Channel lock is released before requesting new skb and then
2450 * reacquired thus we need to recheck channel state.
2452 if (chan->state != BT_CONNECTED) {
2457 l2cap_do_send(chan, skb);
2461 switch (chan->mode) {
2462 case L2CAP_MODE_LE_FLOWCTL:
2463 /* Check outgoing MTU */
2464 if (len > chan->omtu)
2467 if (!chan->tx_credits)
2470 __skb_queue_head_init(&seg_queue);
2472 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2474 if (chan->state != BT_CONNECTED) {
2475 __skb_queue_purge(&seg_queue);
2482 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2484 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2485 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2489 if (!chan->tx_credits)
2490 chan->ops->suspend(chan);
2496 case L2CAP_MODE_BASIC:
2497 /* Check outgoing MTU */
2498 if (len > chan->omtu)
2501 /* Create a basic PDU */
2502 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2504 return PTR_ERR(skb);
2506 /* Channel lock is released before requesting new skb and then
2507 * reacquired thus we need to recheck channel state.
2509 if (chan->state != BT_CONNECTED) {
2514 l2cap_do_send(chan, skb);
2518 case L2CAP_MODE_ERTM:
2519 case L2CAP_MODE_STREAMING:
2520 /* Check outgoing MTU */
2521 if (len > chan->omtu) {
2526 __skb_queue_head_init(&seg_queue);
2528 /* Do segmentation before calling in to the state machine,
2529 * since it's possible to block while waiting for memory
2532 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2534 /* The channel could have been closed while segmenting,
2535 * check that it is still connected.
2537 if (chan->state != BT_CONNECTED) {
2538 __skb_queue_purge(&seg_queue);
2545 if (chan->mode == L2CAP_MODE_ERTM)
2546 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2548 l2cap_streaming_send(chan, &seg_queue);
2552 /* If the skbs were not queued for sending, they'll still be in
2553 * seg_queue and need to be purged.
2555 __skb_queue_purge(&seg_queue);
2559 BT_DBG("bad state %1.1x", chan->mode);
2566 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2568 struct l2cap_ctrl control;
2571 BT_DBG("chan %p, txseq %u", chan, txseq);
2573 memset(&control, 0, sizeof(control));
2575 control.super = L2CAP_SUPER_SREJ;
2577 for (seq = chan->expected_tx_seq; seq != txseq;
2578 seq = __next_seq(chan, seq)) {
2579 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2580 control.reqseq = seq;
2581 l2cap_send_sframe(chan, &control);
2582 l2cap_seq_list_append(&chan->srej_list, seq);
2586 chan->expected_tx_seq = __next_seq(chan, txseq);
2589 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2591 struct l2cap_ctrl control;
2593 BT_DBG("chan %p", chan);
2595 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2598 memset(&control, 0, sizeof(control));
2600 control.super = L2CAP_SUPER_SREJ;
2601 control.reqseq = chan->srej_list.tail;
2602 l2cap_send_sframe(chan, &control);
2605 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2607 struct l2cap_ctrl control;
2611 BT_DBG("chan %p, txseq %u", chan, txseq);
2613 memset(&control, 0, sizeof(control));
2615 control.super = L2CAP_SUPER_SREJ;
2617 /* Capture initial list head to allow only one pass through the list. */
2618 initial_head = chan->srej_list.head;
2621 seq = l2cap_seq_list_pop(&chan->srej_list);
2622 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2625 control.reqseq = seq;
2626 l2cap_send_sframe(chan, &control);
2627 l2cap_seq_list_append(&chan->srej_list, seq);
2628 } while (chan->srej_list.head != initial_head);
2631 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2633 struct sk_buff *acked_skb;
2636 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2638 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2641 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2642 chan->expected_ack_seq, chan->unacked_frames);
2644 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2645 ackseq = __next_seq(chan, ackseq)) {
2647 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2649 skb_unlink(acked_skb, &chan->tx_q);
2650 kfree_skb(acked_skb);
2651 chan->unacked_frames--;
2655 chan->expected_ack_seq = reqseq;
2657 if (chan->unacked_frames == 0)
2658 __clear_retrans_timer(chan);
2660 BT_DBG("unacked_frames %u", chan->unacked_frames);
2663 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2665 BT_DBG("chan %p", chan);
2667 chan->expected_tx_seq = chan->buffer_seq;
2668 l2cap_seq_list_clear(&chan->srej_list);
2669 skb_queue_purge(&chan->srej_q);
2670 chan->rx_state = L2CAP_RX_STATE_RECV;
2673 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2674 struct l2cap_ctrl *control,
2675 struct sk_buff_head *skbs, u8 event)
2677 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2681 case L2CAP_EV_DATA_REQUEST:
2682 if (chan->tx_send_head == NULL)
2683 chan->tx_send_head = skb_peek(skbs);
2685 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2686 l2cap_ertm_send(chan);
2688 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2689 BT_DBG("Enter LOCAL_BUSY");
2690 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2692 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2693 /* The SREJ_SENT state must be aborted if we are to
2694 * enter the LOCAL_BUSY state.
2696 l2cap_abort_rx_srej_sent(chan);
2699 l2cap_send_ack(chan);
2702 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2703 BT_DBG("Exit LOCAL_BUSY");
2704 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2706 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2707 struct l2cap_ctrl local_control;
2709 memset(&local_control, 0, sizeof(local_control));
2710 local_control.sframe = 1;
2711 local_control.super = L2CAP_SUPER_RR;
2712 local_control.poll = 1;
2713 local_control.reqseq = chan->buffer_seq;
2714 l2cap_send_sframe(chan, &local_control);
2716 chan->retry_count = 1;
2717 __set_monitor_timer(chan);
2718 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2721 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2722 l2cap_process_reqseq(chan, control->reqseq);
2724 case L2CAP_EV_EXPLICIT_POLL:
2725 l2cap_send_rr_or_rnr(chan, 1);
2726 chan->retry_count = 1;
2727 __set_monitor_timer(chan);
2728 __clear_ack_timer(chan);
2729 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2731 case L2CAP_EV_RETRANS_TO:
2732 l2cap_send_rr_or_rnr(chan, 1);
2733 chan->retry_count = 1;
2734 __set_monitor_timer(chan);
2735 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2737 case L2CAP_EV_RECV_FBIT:
2738 /* Nothing to process */
2745 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2746 struct l2cap_ctrl *control,
2747 struct sk_buff_head *skbs, u8 event)
2749 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2753 case L2CAP_EV_DATA_REQUEST:
2754 if (chan->tx_send_head == NULL)
2755 chan->tx_send_head = skb_peek(skbs);
2756 /* Queue data, but don't send. */
2757 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2759 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2760 BT_DBG("Enter LOCAL_BUSY");
2761 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2763 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2764 /* The SREJ_SENT state must be aborted if we are to
2765 * enter the LOCAL_BUSY state.
2767 l2cap_abort_rx_srej_sent(chan);
2770 l2cap_send_ack(chan);
2773 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2774 BT_DBG("Exit LOCAL_BUSY");
2775 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2777 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2778 struct l2cap_ctrl local_control;
2779 memset(&local_control, 0, sizeof(local_control));
2780 local_control.sframe = 1;
2781 local_control.super = L2CAP_SUPER_RR;
2782 local_control.poll = 1;
2783 local_control.reqseq = chan->buffer_seq;
2784 l2cap_send_sframe(chan, &local_control);
2786 chan->retry_count = 1;
2787 __set_monitor_timer(chan);
2788 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2791 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2792 l2cap_process_reqseq(chan, control->reqseq);
2796 case L2CAP_EV_RECV_FBIT:
2797 if (control && control->final) {
2798 __clear_monitor_timer(chan);
2799 if (chan->unacked_frames > 0)
2800 __set_retrans_timer(chan);
2801 chan->retry_count = 0;
2802 chan->tx_state = L2CAP_TX_STATE_XMIT;
2803 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2806 case L2CAP_EV_EXPLICIT_POLL:
2809 case L2CAP_EV_MONITOR_TO:
2810 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2811 l2cap_send_rr_or_rnr(chan, 1);
2812 __set_monitor_timer(chan);
2813 chan->retry_count++;
2815 l2cap_send_disconn_req(chan, ECONNABORTED);
2823 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2824 struct sk_buff_head *skbs, u8 event)
2826 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2827 chan, control, skbs, event, chan->tx_state);
2829 switch (chan->tx_state) {
2830 case L2CAP_TX_STATE_XMIT:
2831 l2cap_tx_state_xmit(chan, control, skbs, event);
2833 case L2CAP_TX_STATE_WAIT_F:
2834 l2cap_tx_state_wait_f(chan, control, skbs, event);
2842 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2843 struct l2cap_ctrl *control)
2845 BT_DBG("chan %p, control %p", chan, control);
2846 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2849 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2850 struct l2cap_ctrl *control)
2852 BT_DBG("chan %p, control %p", chan, control);
2853 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2856 /* Copy frame to all raw sockets on that connection */
2857 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2859 struct sk_buff *nskb;
2860 struct l2cap_chan *chan;
2862 BT_DBG("conn %p", conn);
2864 mutex_lock(&conn->chan_lock);
2866 list_for_each_entry(chan, &conn->chan_l, list) {
2867 if (chan->chan_type != L2CAP_CHAN_RAW)
2870 /* Don't send frame to the channel it came from */
2871 if (bt_cb(skb)->chan == chan)
2874 nskb = skb_clone(skb, GFP_KERNEL);
2877 if (chan->ops->recv(chan, nskb))
2881 mutex_unlock(&conn->chan_lock);
2884 /* ---- L2CAP signalling commands ---- */
2885 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2886 u8 ident, u16 dlen, void *data)
2888 struct sk_buff *skb, **frag;
2889 struct l2cap_cmd_hdr *cmd;
2890 struct l2cap_hdr *lh;
2893 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2894 conn, code, ident, dlen);
2896 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2899 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2900 count = min_t(unsigned int, conn->mtu, len);
2902 skb = bt_skb_alloc(count, GFP_KERNEL);
2906 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2907 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2909 if (conn->hcon->type == LE_LINK)
2910 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2912 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2914 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2917 cmd->len = cpu_to_le16(dlen);
2920 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2921 memcpy(skb_put(skb, count), data, count);
2927 /* Continuation fragments (no L2CAP header) */
2928 frag = &skb_shinfo(skb)->frag_list;
2930 count = min_t(unsigned int, conn->mtu, len);
2932 *frag = bt_skb_alloc(count, GFP_KERNEL);
2936 memcpy(skb_put(*frag, count), data, count);
2941 frag = &(*frag)->next;
2951 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2954 struct l2cap_conf_opt *opt = *ptr;
2957 len = L2CAP_CONF_OPT_SIZE + opt->len;
2965 *val = *((u8 *) opt->val);
2969 *val = get_unaligned_le16(opt->val);
2973 *val = get_unaligned_le32(opt->val);
2977 *val = (unsigned long) opt->val;
2981 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2985 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2987 struct l2cap_conf_opt *opt = *ptr;
2989 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2996 *((u8 *) opt->val) = val;
3000 put_unaligned_le16(val, opt->val);
3004 put_unaligned_le32(val, opt->val);
3008 memcpy(opt->val, (void *) val, len);
3012 *ptr += L2CAP_CONF_OPT_SIZE + len;
3015 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3017 struct l2cap_conf_efs efs;
3019 switch (chan->mode) {
3020 case L2CAP_MODE_ERTM:
3021 efs.id = chan->local_id;
3022 efs.stype = chan->local_stype;
3023 efs.msdu = cpu_to_le16(chan->local_msdu);
3024 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3025 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3026 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3029 case L2CAP_MODE_STREAMING:
3031 efs.stype = L2CAP_SERV_BESTEFFORT;
3032 efs.msdu = cpu_to_le16(chan->local_msdu);
3033 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3042 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3043 (unsigned long) &efs);
3046 static void l2cap_ack_timeout(struct work_struct *work)
3048 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3052 BT_DBG("chan %p", chan);
3054 l2cap_chan_lock(chan);
3056 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3057 chan->last_acked_seq);
3060 l2cap_send_rr_or_rnr(chan, 0);
3062 l2cap_chan_unlock(chan);
3063 l2cap_chan_put(chan);
3066 int l2cap_ertm_init(struct l2cap_chan *chan)
3070 chan->next_tx_seq = 0;
3071 chan->expected_tx_seq = 0;
3072 chan->expected_ack_seq = 0;
3073 chan->unacked_frames = 0;
3074 chan->buffer_seq = 0;
3075 chan->frames_sent = 0;
3076 chan->last_acked_seq = 0;
3078 chan->sdu_last_frag = NULL;
3081 skb_queue_head_init(&chan->tx_q);
3083 chan->local_amp_id = AMP_ID_BREDR;
3084 chan->move_id = AMP_ID_BREDR;
3085 chan->move_state = L2CAP_MOVE_STABLE;
3086 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3088 if (chan->mode != L2CAP_MODE_ERTM)
3091 chan->rx_state = L2CAP_RX_STATE_RECV;
3092 chan->tx_state = L2CAP_TX_STATE_XMIT;
3094 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3095 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3096 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3098 skb_queue_head_init(&chan->srej_q);
3100 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3104 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3106 l2cap_seq_list_free(&chan->srej_list);
3111 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3114 case L2CAP_MODE_STREAMING:
3115 case L2CAP_MODE_ERTM:
3116 if (l2cap_mode_supported(mode, remote_feat_mask))
3120 return L2CAP_MODE_BASIC;
3124 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3126 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3129 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3131 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3134 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3135 struct l2cap_conf_rfc *rfc)
3137 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3138 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3140 /* Class 1 devices have must have ERTM timeouts
3141 * exceeding the Link Supervision Timeout. The
3142 * default Link Supervision Timeout for AMP
3143 * controllers is 10 seconds.
3145 * Class 1 devices use 0xffffffff for their
3146 * best-effort flush timeout, so the clamping logic
3147 * will result in a timeout that meets the above
3148 * requirement. ERTM timeouts are 16-bit values, so
3149 * the maximum timeout is 65.535 seconds.
3152 /* Convert timeout to milliseconds and round */
3153 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3155 /* This is the recommended formula for class 2 devices
3156 * that start ERTM timers when packets are sent to the
3159 ertm_to = 3 * ertm_to + 500;
3161 if (ertm_to > 0xffff)
3164 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3165 rfc->monitor_timeout = rfc->retrans_timeout;
3167 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3168 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3172 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3174 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3175 __l2cap_ews_supported(chan->conn)) {
3176 /* use extended control field */
3177 set_bit(FLAG_EXT_CTRL, &chan->flags);
3178 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3180 chan->tx_win = min_t(u16, chan->tx_win,
3181 L2CAP_DEFAULT_TX_WINDOW);
3182 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3184 chan->ack_win = chan->tx_win;
3187 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3189 struct l2cap_conf_req *req = data;
3190 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3191 void *ptr = req->data;
3194 BT_DBG("chan %p", chan);
3196 if (chan->num_conf_req || chan->num_conf_rsp)
3199 switch (chan->mode) {
3200 case L2CAP_MODE_STREAMING:
3201 case L2CAP_MODE_ERTM:
3202 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3205 if (__l2cap_efs_supported(chan->conn))
3206 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3210 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3215 if (chan->imtu != L2CAP_DEFAULT_MTU)
3216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3218 switch (chan->mode) {
3219 case L2CAP_MODE_BASIC:
3220 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3221 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3224 rfc.mode = L2CAP_MODE_BASIC;
3226 rfc.max_transmit = 0;
3227 rfc.retrans_timeout = 0;
3228 rfc.monitor_timeout = 0;
3229 rfc.max_pdu_size = 0;
3231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3232 (unsigned long) &rfc);
3235 case L2CAP_MODE_ERTM:
3236 rfc.mode = L2CAP_MODE_ERTM;
3237 rfc.max_transmit = chan->max_tx;
3239 __l2cap_set_ertm_timeouts(chan, &rfc);
3241 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3242 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3244 rfc.max_pdu_size = cpu_to_le16(size);
3246 l2cap_txwin_setup(chan);
3248 rfc.txwin_size = min_t(u16, chan->tx_win,
3249 L2CAP_DEFAULT_TX_WINDOW);
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3252 (unsigned long) &rfc);
3254 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3255 l2cap_add_opt_efs(&ptr, chan);
3257 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3261 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3262 if (chan->fcs == L2CAP_FCS_NONE ||
3263 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3264 chan->fcs = L2CAP_FCS_NONE;
3265 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3270 case L2CAP_MODE_STREAMING:
3271 l2cap_txwin_setup(chan);
3272 rfc.mode = L2CAP_MODE_STREAMING;
3274 rfc.max_transmit = 0;
3275 rfc.retrans_timeout = 0;
3276 rfc.monitor_timeout = 0;
3278 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3279 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3281 rfc.max_pdu_size = cpu_to_le16(size);
3283 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3284 (unsigned long) &rfc);
3286 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3287 l2cap_add_opt_efs(&ptr, chan);
3289 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3290 if (chan->fcs == L2CAP_FCS_NONE ||
3291 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3292 chan->fcs = L2CAP_FCS_NONE;
3293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3299 req->dcid = cpu_to_le16(chan->dcid);
3300 req->flags = cpu_to_le16(0);
3305 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3307 struct l2cap_conf_rsp *rsp = data;
3308 void *ptr = rsp->data;
3309 void *req = chan->conf_req;
3310 int len = chan->conf_len;
3311 int type, hint, olen;
3313 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3314 struct l2cap_conf_efs efs;
3316 u16 mtu = L2CAP_DEFAULT_MTU;
3317 u16 result = L2CAP_CONF_SUCCESS;
3320 BT_DBG("chan %p", chan);
3322 while (len >= L2CAP_CONF_OPT_SIZE) {
3323 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3325 hint = type & L2CAP_CONF_HINT;
3326 type &= L2CAP_CONF_MASK;
3329 case L2CAP_CONF_MTU:
3333 case L2CAP_CONF_FLUSH_TO:
3334 chan->flush_to = val;
3337 case L2CAP_CONF_QOS:
3340 case L2CAP_CONF_RFC:
3341 if (olen == sizeof(rfc))
3342 memcpy(&rfc, (void *) val, olen);
3345 case L2CAP_CONF_FCS:
3346 if (val == L2CAP_FCS_NONE)
3347 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3350 case L2CAP_CONF_EFS:
3352 if (olen == sizeof(efs))
3353 memcpy(&efs, (void *) val, olen);
3356 case L2CAP_CONF_EWS:
3357 if (!chan->conn->hs_enabled)
3358 return -ECONNREFUSED;
3360 set_bit(FLAG_EXT_CTRL, &chan->flags);
3361 set_bit(CONF_EWS_RECV, &chan->conf_state);
3362 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3363 chan->remote_tx_win = val;
3370 result = L2CAP_CONF_UNKNOWN;
3371 *((u8 *) ptr++) = type;
3376 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3379 switch (chan->mode) {
3380 case L2CAP_MODE_STREAMING:
3381 case L2CAP_MODE_ERTM:
3382 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3383 chan->mode = l2cap_select_mode(rfc.mode,
3384 chan->conn->feat_mask);
3389 if (__l2cap_efs_supported(chan->conn))
3390 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3392 return -ECONNREFUSED;
3395 if (chan->mode != rfc.mode)
3396 return -ECONNREFUSED;
3402 if (chan->mode != rfc.mode) {
3403 result = L2CAP_CONF_UNACCEPT;
3404 rfc.mode = chan->mode;
3406 if (chan->num_conf_rsp == 1)
3407 return -ECONNREFUSED;
3409 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3410 (unsigned long) &rfc);
3413 if (result == L2CAP_CONF_SUCCESS) {
3414 /* Configure output options and let the other side know
3415 * which ones we don't like. */
3417 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3418 result = L2CAP_CONF_UNACCEPT;
3421 set_bit(CONF_MTU_DONE, &chan->conf_state);
3423 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3426 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 efs.stype != chan->local_stype) {
3430 result = L2CAP_CONF_UNACCEPT;
3432 if (chan->num_conf_req >= 1)
3433 return -ECONNREFUSED;
3435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3437 (unsigned long) &efs);
3439 /* Send PENDING Conf Rsp */
3440 result = L2CAP_CONF_PENDING;
3441 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3446 case L2CAP_MODE_BASIC:
3447 chan->fcs = L2CAP_FCS_NONE;
3448 set_bit(CONF_MODE_DONE, &chan->conf_state);
3451 case L2CAP_MODE_ERTM:
3452 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3453 chan->remote_tx_win = rfc.txwin_size;
3455 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3457 chan->remote_max_tx = rfc.max_transmit;
3459 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3460 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3461 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3462 rfc.max_pdu_size = cpu_to_le16(size);
3463 chan->remote_mps = size;
3465 __l2cap_set_ertm_timeouts(chan, &rfc);
3467 set_bit(CONF_MODE_DONE, &chan->conf_state);
3469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3470 sizeof(rfc), (unsigned long) &rfc);
3472 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3473 chan->remote_id = efs.id;
3474 chan->remote_stype = efs.stype;
3475 chan->remote_msdu = le16_to_cpu(efs.msdu);
3476 chan->remote_flush_to =
3477 le32_to_cpu(efs.flush_to);
3478 chan->remote_acc_lat =
3479 le32_to_cpu(efs.acc_lat);
3480 chan->remote_sdu_itime =
3481 le32_to_cpu(efs.sdu_itime);
3482 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3484 (unsigned long) &efs);
3488 case L2CAP_MODE_STREAMING:
3489 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3490 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3491 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3492 rfc.max_pdu_size = cpu_to_le16(size);
3493 chan->remote_mps = size;
3495 set_bit(CONF_MODE_DONE, &chan->conf_state);
3497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3498 (unsigned long) &rfc);
3503 result = L2CAP_CONF_UNACCEPT;
3505 memset(&rfc, 0, sizeof(rfc));
3506 rfc.mode = chan->mode;
3509 if (result == L2CAP_CONF_SUCCESS)
3510 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3512 rsp->scid = cpu_to_le16(chan->dcid);
3513 rsp->result = cpu_to_le16(result);
3514 rsp->flags = cpu_to_le16(0);
3519 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3520 void *data, u16 *result)
3522 struct l2cap_conf_req *req = data;
3523 void *ptr = req->data;
3526 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3527 struct l2cap_conf_efs efs;
3529 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3531 while (len >= L2CAP_CONF_OPT_SIZE) {
3532 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3535 case L2CAP_CONF_MTU:
3536 if (val < L2CAP_DEFAULT_MIN_MTU) {
3537 *result = L2CAP_CONF_UNACCEPT;
3538 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3544 case L2CAP_CONF_FLUSH_TO:
3545 chan->flush_to = val;
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3550 case L2CAP_CONF_RFC:
3551 if (olen == sizeof(rfc))
3552 memcpy(&rfc, (void *)val, olen);
3554 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3555 rfc.mode != chan->mode)
3556 return -ECONNREFUSED;
3560 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3561 sizeof(rfc), (unsigned long) &rfc);
3564 case L2CAP_CONF_EWS:
3565 chan->ack_win = min_t(u16, val, chan->ack_win);
3566 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3570 case L2CAP_CONF_EFS:
3571 if (olen == sizeof(efs))
3572 memcpy(&efs, (void *)val, olen);
3574 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3575 efs.stype != L2CAP_SERV_NOTRAFIC &&
3576 efs.stype != chan->local_stype)
3577 return -ECONNREFUSED;
3579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3580 (unsigned long) &efs);
3583 case L2CAP_CONF_FCS:
3584 if (*result == L2CAP_CONF_PENDING)
3585 if (val == L2CAP_FCS_NONE)
3586 set_bit(CONF_RECV_NO_FCS,
3592 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3593 return -ECONNREFUSED;
3595 chan->mode = rfc.mode;
3597 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3599 case L2CAP_MODE_ERTM:
3600 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3601 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3602 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3603 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3604 chan->ack_win = min_t(u16, chan->ack_win,
3607 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3608 chan->local_msdu = le16_to_cpu(efs.msdu);
3609 chan->local_sdu_itime =
3610 le32_to_cpu(efs.sdu_itime);
3611 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3612 chan->local_flush_to =
3613 le32_to_cpu(efs.flush_to);
3617 case L2CAP_MODE_STREAMING:
3618 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3622 req->dcid = cpu_to_le16(chan->dcid);
3623 req->flags = cpu_to_le16(0);
3628 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3629 u16 result, u16 flags)
3631 struct l2cap_conf_rsp *rsp = data;
3632 void *ptr = rsp->data;
3634 BT_DBG("chan %p", chan);
3636 rsp->scid = cpu_to_le16(chan->dcid);
3637 rsp->result = cpu_to_le16(result);
3638 rsp->flags = cpu_to_le16(flags);
3643 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3645 struct l2cap_le_conn_rsp rsp;
3646 struct l2cap_conn *conn = chan->conn;
3648 BT_DBG("chan %p", chan);
3650 rsp.dcid = cpu_to_le16(chan->scid);
3651 rsp.mtu = cpu_to_le16(chan->imtu);
3652 rsp.mps = cpu_to_le16(chan->mps);
3653 rsp.credits = cpu_to_le16(chan->rx_credits);
3654 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3656 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3660 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3662 struct l2cap_conn_rsp rsp;
3663 struct l2cap_conn *conn = chan->conn;
3667 rsp.scid = cpu_to_le16(chan->dcid);
3668 rsp.dcid = cpu_to_le16(chan->scid);
3669 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3670 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3673 rsp_code = L2CAP_CREATE_CHAN_RSP;
3675 rsp_code = L2CAP_CONN_RSP;
3677 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3679 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3681 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3684 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3685 l2cap_build_conf_req(chan, buf), buf);
3686 chan->num_conf_req++;
3689 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3693 /* Use sane default values in case a misbehaving remote device
3694 * did not send an RFC or extended window size option.
3696 u16 txwin_ext = chan->ack_win;
3697 struct l2cap_conf_rfc rfc = {
3699 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3700 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3701 .max_pdu_size = cpu_to_le16(chan->imtu),
3702 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3705 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3707 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3710 while (len >= L2CAP_CONF_OPT_SIZE) {
3711 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3714 case L2CAP_CONF_RFC:
3715 if (olen == sizeof(rfc))
3716 memcpy(&rfc, (void *)val, olen);
3718 case L2CAP_CONF_EWS:
3725 case L2CAP_MODE_ERTM:
3726 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3727 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3728 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3729 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3730 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3732 chan->ack_win = min_t(u16, chan->ack_win,
3735 case L2CAP_MODE_STREAMING:
3736 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3740 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3741 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3744 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3746 if (cmd_len < sizeof(*rej))
3749 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3752 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3753 cmd->ident == conn->info_ident) {
3754 cancel_delayed_work(&conn->info_timer);
3756 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3757 conn->info_ident = 0;
3759 l2cap_conn_start(conn);
3765 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3766 struct l2cap_cmd_hdr *cmd,
3767 u8 *data, u8 rsp_code, u8 amp_id)
3769 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3770 struct l2cap_conn_rsp rsp;
3771 struct l2cap_chan *chan = NULL, *pchan;
3772 int result, status = L2CAP_CS_NO_INFO;
3774 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3775 __le16 psm = req->psm;
3777 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3779 /* Check if we have socket listening on psm */
3780 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3781 &conn->hcon->dst, ACL_LINK);
3783 result = L2CAP_CR_BAD_PSM;
3787 mutex_lock(&conn->chan_lock);
3788 l2cap_chan_lock(pchan);
3790 /* Check if the ACL is secure enough (if not SDP) */
3791 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3792 !hci_conn_check_link_mode(conn->hcon)) {
3793 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3794 result = L2CAP_CR_SEC_BLOCK;
3798 result = L2CAP_CR_NO_MEM;
3800 /* Check if we already have channel with that dcid */
3801 if (__l2cap_get_chan_by_dcid(conn, scid))
3804 chan = pchan->ops->new_connection(pchan);
3808 /* For certain devices (ex: HID mouse), support for authentication,
3809 * pairing and bonding is optional. For such devices, inorder to avoid
3810 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3811 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3813 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3815 bacpy(&chan->src, &conn->hcon->src);
3816 bacpy(&chan->dst, &conn->hcon->dst);
3817 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3818 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3821 chan->local_amp_id = amp_id;
3823 __l2cap_chan_add(conn, chan);
3827 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3829 chan->ident = cmd->ident;
3831 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3832 if (l2cap_chan_check_security(chan)) {
3833 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3834 l2cap_state_change(chan, BT_CONNECT2);
3835 result = L2CAP_CR_PEND;
3836 status = L2CAP_CS_AUTHOR_PEND;
3837 chan->ops->defer(chan);
3839 /* Force pending result for AMP controllers.
3840 * The connection will succeed after the
3841 * physical link is up.
3843 if (amp_id == AMP_ID_BREDR) {
3844 l2cap_state_change(chan, BT_CONFIG);
3845 result = L2CAP_CR_SUCCESS;
3847 l2cap_state_change(chan, BT_CONNECT2);
3848 result = L2CAP_CR_PEND;
3850 status = L2CAP_CS_NO_INFO;
3853 l2cap_state_change(chan, BT_CONNECT2);
3854 result = L2CAP_CR_PEND;
3855 status = L2CAP_CS_AUTHEN_PEND;
3858 l2cap_state_change(chan, BT_CONNECT2);
3859 result = L2CAP_CR_PEND;
3860 status = L2CAP_CS_NO_INFO;
3864 l2cap_chan_unlock(pchan);
3865 mutex_unlock(&conn->chan_lock);
3868 rsp.scid = cpu_to_le16(scid);
3869 rsp.dcid = cpu_to_le16(dcid);
3870 rsp.result = cpu_to_le16(result);
3871 rsp.status = cpu_to_le16(status);
3872 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3874 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3875 struct l2cap_info_req info;
3876 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3878 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3879 conn->info_ident = l2cap_get_ident(conn);
3881 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3883 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3884 sizeof(info), &info);
3887 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3888 result == L2CAP_CR_SUCCESS) {
3890 set_bit(CONF_REQ_SENT, &chan->conf_state);
3891 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3892 l2cap_build_conf_req(chan, buf), buf);
3893 chan->num_conf_req++;
3899 static int l2cap_connect_req(struct l2cap_conn *conn,
3900 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3902 struct hci_dev *hdev = conn->hcon->hdev;
3903 struct hci_conn *hcon = conn->hcon;
3905 if (cmd_len < sizeof(struct l2cap_conn_req))
3909 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3910 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3911 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3912 hcon->dst_type, 0, NULL, 0,
3914 hci_dev_unlock(hdev);
3916 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3920 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3921 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3924 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3925 u16 scid, dcid, result, status;
3926 struct l2cap_chan *chan;
3930 if (cmd_len < sizeof(*rsp))
3933 scid = __le16_to_cpu(rsp->scid);
3934 dcid = __le16_to_cpu(rsp->dcid);
3935 result = __le16_to_cpu(rsp->result);
3936 status = __le16_to_cpu(rsp->status);
3938 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3939 dcid, scid, result, status);
3941 mutex_lock(&conn->chan_lock);
3944 chan = __l2cap_get_chan_by_scid(conn, scid);
3950 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3959 l2cap_chan_lock(chan);
3962 case L2CAP_CR_SUCCESS:
3963 l2cap_state_change(chan, BT_CONFIG);
3966 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3968 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3971 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3972 l2cap_build_conf_req(chan, req), req);
3973 chan->num_conf_req++;
3977 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3981 l2cap_chan_del(chan, ECONNREFUSED);
3985 l2cap_chan_unlock(chan);
3988 mutex_unlock(&conn->chan_lock);
3993 static inline void set_default_fcs(struct l2cap_chan *chan)
3995 /* FCS is enabled only in ERTM or streaming mode, if one or both
3998 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3999 chan->fcs = L2CAP_FCS_NONE;
4000 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4001 chan->fcs = L2CAP_FCS_CRC16;
4004 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4005 u8 ident, u16 flags)
4007 struct l2cap_conn *conn = chan->conn;
4009 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4012 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4013 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4015 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4016 l2cap_build_conf_rsp(chan, data,
4017 L2CAP_CONF_SUCCESS, flags), data);
4020 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4023 struct l2cap_cmd_rej_cid rej;
4025 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4026 rej.scid = __cpu_to_le16(scid);
4027 rej.dcid = __cpu_to_le16(dcid);
4029 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4032 static inline int l2cap_config_req(struct l2cap_conn *conn,
4033 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4036 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4039 struct l2cap_chan *chan;
4042 if (cmd_len < sizeof(*req))
4045 dcid = __le16_to_cpu(req->dcid);
4046 flags = __le16_to_cpu(req->flags);
4048 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4050 chan = l2cap_get_chan_by_scid(conn, dcid);
4052 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4056 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4057 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4062 /* Reject if config buffer is too small. */
4063 len = cmd_len - sizeof(*req);
4064 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4065 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4066 l2cap_build_conf_rsp(chan, rsp,
4067 L2CAP_CONF_REJECT, flags), rsp);
4072 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4073 chan->conf_len += len;
4075 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4076 /* Incomplete config. Send empty response. */
4077 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4078 l2cap_build_conf_rsp(chan, rsp,
4079 L2CAP_CONF_SUCCESS, flags), rsp);
4083 /* Complete config. */
4084 len = l2cap_parse_conf_req(chan, rsp);
4086 l2cap_send_disconn_req(chan, ECONNRESET);
4090 chan->ident = cmd->ident;
4091 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4092 chan->num_conf_rsp++;
4094 /* Reset config buffer. */
4097 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4100 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4101 set_default_fcs(chan);
4103 if (chan->mode == L2CAP_MODE_ERTM ||
4104 chan->mode == L2CAP_MODE_STREAMING)
4105 err = l2cap_ertm_init(chan);
4108 l2cap_send_disconn_req(chan, -err);
4110 l2cap_chan_ready(chan);
4115 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4117 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4118 l2cap_build_conf_req(chan, buf), buf);
4119 chan->num_conf_req++;
4122 /* Got Conf Rsp PENDING from remote side and asume we sent
4123 Conf Rsp PENDING in the code above */
4124 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4125 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4127 /* check compatibility */
4129 /* Send rsp for BR/EDR channel */
4131 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4133 chan->ident = cmd->ident;
4137 l2cap_chan_unlock(chan);
4141 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4142 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4145 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4146 u16 scid, flags, result;
4147 struct l2cap_chan *chan;
4148 int len = cmd_len - sizeof(*rsp);
4151 if (cmd_len < sizeof(*rsp))
4154 scid = __le16_to_cpu(rsp->scid);
4155 flags = __le16_to_cpu(rsp->flags);
4156 result = __le16_to_cpu(rsp->result);
4158 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4161 chan = l2cap_get_chan_by_scid(conn, scid);
4166 case L2CAP_CONF_SUCCESS:
4167 l2cap_conf_rfc_get(chan, rsp->data, len);
4168 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4171 case L2CAP_CONF_PENDING:
4172 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4174 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4177 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4180 l2cap_send_disconn_req(chan, ECONNRESET);
4184 if (!chan->hs_hcon) {
4185 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4188 if (l2cap_check_efs(chan)) {
4189 amp_create_logical_link(chan);
4190 chan->ident = cmd->ident;
4196 case L2CAP_CONF_UNACCEPT:
4197 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4200 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4201 l2cap_send_disconn_req(chan, ECONNRESET);
4205 /* throw out any old stored conf requests */
4206 result = L2CAP_CONF_SUCCESS;
4207 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4210 l2cap_send_disconn_req(chan, ECONNRESET);
4214 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4215 L2CAP_CONF_REQ, len, req);
4216 chan->num_conf_req++;
4217 if (result != L2CAP_CONF_SUCCESS)
4223 l2cap_chan_set_err(chan, ECONNRESET);
4225 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4226 l2cap_send_disconn_req(chan, ECONNRESET);
4230 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4233 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4235 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4236 set_default_fcs(chan);
4238 if (chan->mode == L2CAP_MODE_ERTM ||
4239 chan->mode == L2CAP_MODE_STREAMING)
4240 err = l2cap_ertm_init(chan);
4243 l2cap_send_disconn_req(chan, -err);
4245 l2cap_chan_ready(chan);
4249 l2cap_chan_unlock(chan);
4253 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4254 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4257 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4258 struct l2cap_disconn_rsp rsp;
4260 struct l2cap_chan *chan;
4262 if (cmd_len != sizeof(*req))
4265 scid = __le16_to_cpu(req->scid);
4266 dcid = __le16_to_cpu(req->dcid);
4268 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4270 mutex_lock(&conn->chan_lock);
4272 chan = __l2cap_get_chan_by_scid(conn, dcid);
4274 mutex_unlock(&conn->chan_lock);
4275 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4279 l2cap_chan_lock(chan);
4281 rsp.dcid = cpu_to_le16(chan->scid);
4282 rsp.scid = cpu_to_le16(chan->dcid);
4283 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4285 chan->ops->set_shutdown(chan);
4287 l2cap_chan_hold(chan);
4288 l2cap_chan_del(chan, ECONNRESET);
4290 l2cap_chan_unlock(chan);
4292 chan->ops->close(chan);
4293 l2cap_chan_put(chan);
4295 mutex_unlock(&conn->chan_lock);
4300 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4301 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4304 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4306 struct l2cap_chan *chan;
4308 if (cmd_len != sizeof(*rsp))
4311 scid = __le16_to_cpu(rsp->scid);
4312 dcid = __le16_to_cpu(rsp->dcid);
4314 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4316 mutex_lock(&conn->chan_lock);
4318 chan = __l2cap_get_chan_by_scid(conn, scid);
4320 mutex_unlock(&conn->chan_lock);
4324 l2cap_chan_lock(chan);
4326 l2cap_chan_hold(chan);
4327 l2cap_chan_del(chan, 0);
4329 l2cap_chan_unlock(chan);
4331 chan->ops->close(chan);
4332 l2cap_chan_put(chan);
4334 mutex_unlock(&conn->chan_lock);
4339 static inline int l2cap_information_req(struct l2cap_conn *conn,
4340 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4343 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4346 if (cmd_len != sizeof(*req))
4349 type = __le16_to_cpu(req->type);
4351 BT_DBG("type 0x%4.4x", type);
4353 if (type == L2CAP_IT_FEAT_MASK) {
4355 u32 feat_mask = l2cap_feat_mask;
4356 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4357 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4358 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4360 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4362 if (conn->hs_enabled)
4363 feat_mask |= L2CAP_FEAT_EXT_FLOW
4364 | L2CAP_FEAT_EXT_WINDOW;
4366 put_unaligned_le32(feat_mask, rsp->data);
4367 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4369 } else if (type == L2CAP_IT_FIXED_CHAN) {
4371 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4373 if (conn->hs_enabled)
4374 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4376 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4378 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4379 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4380 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4381 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4384 struct l2cap_info_rsp rsp;
4385 rsp.type = cpu_to_le16(type);
4386 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4387 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4394 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4395 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4398 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4401 if (cmd_len < sizeof(*rsp))
4404 type = __le16_to_cpu(rsp->type);
4405 result = __le16_to_cpu(rsp->result);
4407 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4409 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4410 if (cmd->ident != conn->info_ident ||
4411 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4414 cancel_delayed_work(&conn->info_timer);
4416 if (result != L2CAP_IR_SUCCESS) {
4417 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4418 conn->info_ident = 0;
4420 l2cap_conn_start(conn);
4426 case L2CAP_IT_FEAT_MASK:
4427 conn->feat_mask = get_unaligned_le32(rsp->data);
4429 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4430 struct l2cap_info_req req;
4431 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4433 conn->info_ident = l2cap_get_ident(conn);
4435 l2cap_send_cmd(conn, conn->info_ident,
4436 L2CAP_INFO_REQ, sizeof(req), &req);
4438 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4439 conn->info_ident = 0;
4441 l2cap_conn_start(conn);
4445 case L2CAP_IT_FIXED_CHAN:
4446 conn->fixed_chan_mask = rsp->data[0];
4447 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4448 conn->info_ident = 0;
4450 l2cap_conn_start(conn);
4457 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4458 struct l2cap_cmd_hdr *cmd,
4459 u16 cmd_len, void *data)
4461 struct l2cap_create_chan_req *req = data;
4462 struct l2cap_create_chan_rsp rsp;
4463 struct l2cap_chan *chan;
4464 struct hci_dev *hdev;
4467 if (cmd_len != sizeof(*req))
4470 if (!conn->hs_enabled)
4473 psm = le16_to_cpu(req->psm);
4474 scid = le16_to_cpu(req->scid);
4476 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4478 /* For controller id 0 make BR/EDR connection */
4479 if (req->amp_id == AMP_ID_BREDR) {
4480 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4485 /* Validate AMP controller id */
4486 hdev = hci_dev_get(req->amp_id);
4490 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4495 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4498 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4499 struct hci_conn *hs_hcon;
4501 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4505 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4510 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4512 mgr->bredr_chan = chan;
4513 chan->hs_hcon = hs_hcon;
4514 chan->fcs = L2CAP_FCS_NONE;
4515 conn->mtu = hdev->block_mtu;
4524 rsp.scid = cpu_to_le16(scid);
4525 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4526 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4528 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4534 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4536 struct l2cap_move_chan_req req;
4539 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4541 ident = l2cap_get_ident(chan->conn);
4542 chan->ident = ident;
4544 req.icid = cpu_to_le16(chan->scid);
4545 req.dest_amp_id = dest_amp_id;
4547 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4550 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4553 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4555 struct l2cap_move_chan_rsp rsp;
4557 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4559 rsp.icid = cpu_to_le16(chan->dcid);
4560 rsp.result = cpu_to_le16(result);
4562 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4566 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4568 struct l2cap_move_chan_cfm cfm;
4570 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4572 chan->ident = l2cap_get_ident(chan->conn);
4574 cfm.icid = cpu_to_le16(chan->scid);
4575 cfm.result = cpu_to_le16(result);
4577 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4580 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4583 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4585 struct l2cap_move_chan_cfm cfm;
4587 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4589 cfm.icid = cpu_to_le16(icid);
4590 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4592 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4596 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4599 struct l2cap_move_chan_cfm_rsp rsp;
4601 BT_DBG("icid 0x%4.4x", icid);
4603 rsp.icid = cpu_to_le16(icid);
4604 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4607 static void __release_logical_link(struct l2cap_chan *chan)
4609 chan->hs_hchan = NULL;
4610 chan->hs_hcon = NULL;
4612 /* Placeholder - release the logical link */
4615 static void l2cap_logical_fail(struct l2cap_chan *chan)
4617 /* Logical link setup failed */
4618 if (chan->state != BT_CONNECTED) {
4619 /* Create channel failure, disconnect */
4620 l2cap_send_disconn_req(chan, ECONNRESET);
4624 switch (chan->move_role) {
4625 case L2CAP_MOVE_ROLE_RESPONDER:
4626 l2cap_move_done(chan);
4627 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4629 case L2CAP_MOVE_ROLE_INITIATOR:
4630 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4631 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4632 /* Remote has only sent pending or
4633 * success responses, clean up
4635 l2cap_move_done(chan);
4638 /* Other amp move states imply that the move
4639 * has already aborted
4641 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4646 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4647 struct hci_chan *hchan)
4649 struct l2cap_conf_rsp rsp;
4651 chan->hs_hchan = hchan;
4652 chan->hs_hcon->l2cap_data = chan->conn;
4654 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4656 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4659 set_default_fcs(chan);
4661 err = l2cap_ertm_init(chan);
4663 l2cap_send_disconn_req(chan, -err);
4665 l2cap_chan_ready(chan);
4669 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4670 struct hci_chan *hchan)
4672 chan->hs_hcon = hchan->conn;
4673 chan->hs_hcon->l2cap_data = chan->conn;
4675 BT_DBG("move_state %d", chan->move_state);
4677 switch (chan->move_state) {
4678 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4679 /* Move confirm will be sent after a success
4680 * response is received
4682 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4684 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4685 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4686 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4687 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4688 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4689 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4690 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4691 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4692 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4696 /* Move was not in expected state, free the channel */
4697 __release_logical_link(chan);
4699 chan->move_state = L2CAP_MOVE_STABLE;
4703 /* Call with chan locked */
4704 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4707 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4710 l2cap_logical_fail(chan);
4711 __release_logical_link(chan);
4715 if (chan->state != BT_CONNECTED) {
4716 /* Ignore logical link if channel is on BR/EDR */
4717 if (chan->local_amp_id != AMP_ID_BREDR)
4718 l2cap_logical_finish_create(chan, hchan);
4720 l2cap_logical_finish_move(chan, hchan);
4724 void l2cap_move_start(struct l2cap_chan *chan)
4726 BT_DBG("chan %p", chan);
4728 if (chan->local_amp_id == AMP_ID_BREDR) {
4729 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4731 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4732 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4733 /* Placeholder - start physical link setup */
4735 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4736 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4738 l2cap_move_setup(chan);
4739 l2cap_send_move_chan_req(chan, 0);
4743 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4744 u8 local_amp_id, u8 remote_amp_id)
4746 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4747 local_amp_id, remote_amp_id);
4749 chan->fcs = L2CAP_FCS_NONE;
4751 /* Outgoing channel on AMP */
4752 if (chan->state == BT_CONNECT) {
4753 if (result == L2CAP_CR_SUCCESS) {
4754 chan->local_amp_id = local_amp_id;
4755 l2cap_send_create_chan_req(chan, remote_amp_id);
4757 /* Revert to BR/EDR connect */
4758 l2cap_send_conn_req(chan);
4764 /* Incoming channel on AMP */
4765 if (__l2cap_no_conn_pending(chan)) {
4766 struct l2cap_conn_rsp rsp;
4768 rsp.scid = cpu_to_le16(chan->dcid);
4769 rsp.dcid = cpu_to_le16(chan->scid);
4771 if (result == L2CAP_CR_SUCCESS) {
4772 /* Send successful response */
4773 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4774 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4776 /* Send negative response */
4777 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4778 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4781 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4784 if (result == L2CAP_CR_SUCCESS) {
4785 l2cap_state_change(chan, BT_CONFIG);
4786 set_bit(CONF_REQ_SENT, &chan->conf_state);
4787 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4789 l2cap_build_conf_req(chan, buf), buf);
4790 chan->num_conf_req++;
4795 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4798 l2cap_move_setup(chan);
4799 chan->move_id = local_amp_id;
4800 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4802 l2cap_send_move_chan_req(chan, remote_amp_id);
4805 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4807 struct hci_chan *hchan = NULL;
4809 /* Placeholder - get hci_chan for logical link */
4812 if (hchan->state == BT_CONNECTED) {
4813 /* Logical link is ready to go */
4814 chan->hs_hcon = hchan->conn;
4815 chan->hs_hcon->l2cap_data = chan->conn;
4816 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4817 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4819 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4821 /* Wait for logical link to be ready */
4822 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4825 /* Logical link not available */
4826 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4830 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4832 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4834 if (result == -EINVAL)
4835 rsp_result = L2CAP_MR_BAD_ID;
4837 rsp_result = L2CAP_MR_NOT_ALLOWED;
4839 l2cap_send_move_chan_rsp(chan, rsp_result);
4842 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4843 chan->move_state = L2CAP_MOVE_STABLE;
4845 /* Restart data transmission */
4846 l2cap_ertm_send(chan);
4849 /* Invoke with locked chan */
4850 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4852 u8 local_amp_id = chan->local_amp_id;
4853 u8 remote_amp_id = chan->remote_amp_id;
4855 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4856 chan, result, local_amp_id, remote_amp_id);
4858 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4859 l2cap_chan_unlock(chan);
4863 if (chan->state != BT_CONNECTED) {
4864 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4865 } else if (result != L2CAP_MR_SUCCESS) {
4866 l2cap_do_move_cancel(chan, result);
4868 switch (chan->move_role) {
4869 case L2CAP_MOVE_ROLE_INITIATOR:
4870 l2cap_do_move_initiate(chan, local_amp_id,
4873 case L2CAP_MOVE_ROLE_RESPONDER:
4874 l2cap_do_move_respond(chan, result);
4877 l2cap_do_move_cancel(chan, result);
4883 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4884 struct l2cap_cmd_hdr *cmd,
4885 u16 cmd_len, void *data)
4887 struct l2cap_move_chan_req *req = data;
4888 struct l2cap_move_chan_rsp rsp;
4889 struct l2cap_chan *chan;
4891 u16 result = L2CAP_MR_NOT_ALLOWED;
4893 if (cmd_len != sizeof(*req))
4896 icid = le16_to_cpu(req->icid);
4898 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4900 if (!conn->hs_enabled)
4903 chan = l2cap_get_chan_by_dcid(conn, icid);
4905 rsp.icid = cpu_to_le16(icid);
4906 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4907 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4912 chan->ident = cmd->ident;
4914 if (chan->scid < L2CAP_CID_DYN_START ||
4915 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4916 (chan->mode != L2CAP_MODE_ERTM &&
4917 chan->mode != L2CAP_MODE_STREAMING)) {
4918 result = L2CAP_MR_NOT_ALLOWED;
4919 goto send_move_response;
4922 if (chan->local_amp_id == req->dest_amp_id) {
4923 result = L2CAP_MR_SAME_ID;
4924 goto send_move_response;
4927 if (req->dest_amp_id != AMP_ID_BREDR) {
4928 struct hci_dev *hdev;
4929 hdev = hci_dev_get(req->dest_amp_id);
4930 if (!hdev || hdev->dev_type != HCI_AMP ||
4931 !test_bit(HCI_UP, &hdev->flags)) {
4935 result = L2CAP_MR_BAD_ID;
4936 goto send_move_response;
4941 /* Detect a move collision. Only send a collision response
4942 * if this side has "lost", otherwise proceed with the move.
4943 * The winner has the larger bd_addr.
4945 if ((__chan_is_moving(chan) ||
4946 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4947 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4948 result = L2CAP_MR_COLLISION;
4949 goto send_move_response;
4952 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4953 l2cap_move_setup(chan);
4954 chan->move_id = req->dest_amp_id;
4957 if (req->dest_amp_id == AMP_ID_BREDR) {
4958 /* Moving to BR/EDR */
4959 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4960 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4961 result = L2CAP_MR_PEND;
4963 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4964 result = L2CAP_MR_SUCCESS;
4967 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4968 /* Placeholder - uncomment when amp functions are available */
4969 /*amp_accept_physical(chan, req->dest_amp_id);*/
4970 result = L2CAP_MR_PEND;
4974 l2cap_send_move_chan_rsp(chan, result);
4976 l2cap_chan_unlock(chan);
4981 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4983 struct l2cap_chan *chan;
4984 struct hci_chan *hchan = NULL;
4986 chan = l2cap_get_chan_by_scid(conn, icid);
4988 l2cap_send_move_chan_cfm_icid(conn, icid);
4992 __clear_chan_timer(chan);
4993 if (result == L2CAP_MR_PEND)
4994 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4996 switch (chan->move_state) {
4997 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4998 /* Move confirm will be sent when logical link
5001 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5003 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5004 if (result == L2CAP_MR_PEND) {
5006 } else if (test_bit(CONN_LOCAL_BUSY,
5007 &chan->conn_state)) {
5008 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5010 /* Logical link is up or moving to BR/EDR,
5013 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5014 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5017 case L2CAP_MOVE_WAIT_RSP:
5019 if (result == L2CAP_MR_SUCCESS) {
5020 /* Remote is ready, send confirm immediately
5021 * after logical link is ready
5023 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5025 /* Both logical link and move success
5026 * are required to confirm
5028 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5031 /* Placeholder - get hci_chan for logical link */
5033 /* Logical link not available */
5034 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5038 /* If the logical link is not yet connected, do not
5039 * send confirmation.
5041 if (hchan->state != BT_CONNECTED)
5044 /* Logical link is already ready to go */
5046 chan->hs_hcon = hchan->conn;
5047 chan->hs_hcon->l2cap_data = chan->conn;
5049 if (result == L2CAP_MR_SUCCESS) {
5050 /* Can confirm now */
5051 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5053 /* Now only need move success
5056 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5059 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5062 /* Any other amp move state means the move failed. */
5063 chan->move_id = chan->local_amp_id;
5064 l2cap_move_done(chan);
5065 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5068 l2cap_chan_unlock(chan);
5071 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5074 struct l2cap_chan *chan;
5076 chan = l2cap_get_chan_by_ident(conn, ident);
5078 /* Could not locate channel, icid is best guess */
5079 l2cap_send_move_chan_cfm_icid(conn, icid);
5083 __clear_chan_timer(chan);
5085 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5086 if (result == L2CAP_MR_COLLISION) {
5087 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5089 /* Cleanup - cancel move */
5090 chan->move_id = chan->local_amp_id;
5091 l2cap_move_done(chan);
5095 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5097 l2cap_chan_unlock(chan);
5100 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5101 struct l2cap_cmd_hdr *cmd,
5102 u16 cmd_len, void *data)
5104 struct l2cap_move_chan_rsp *rsp = data;
5107 if (cmd_len != sizeof(*rsp))
5110 icid = le16_to_cpu(rsp->icid);
5111 result = le16_to_cpu(rsp->result);
5113 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5115 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5116 l2cap_move_continue(conn, icid, result);
5118 l2cap_move_fail(conn, cmd->ident, icid, result);
5123 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5124 struct l2cap_cmd_hdr *cmd,
5125 u16 cmd_len, void *data)
5127 struct l2cap_move_chan_cfm *cfm = data;
5128 struct l2cap_chan *chan;
5131 if (cmd_len != sizeof(*cfm))
5134 icid = le16_to_cpu(cfm->icid);
5135 result = le16_to_cpu(cfm->result);
5137 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5139 chan = l2cap_get_chan_by_dcid(conn, icid);
5141 /* Spec requires a response even if the icid was not found */
5142 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5146 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5147 if (result == L2CAP_MC_CONFIRMED) {
5148 chan->local_amp_id = chan->move_id;
5149 if (chan->local_amp_id == AMP_ID_BREDR)
5150 __release_logical_link(chan);
5152 chan->move_id = chan->local_amp_id;
5155 l2cap_move_done(chan);
5158 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5160 l2cap_chan_unlock(chan);
5165 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5166 struct l2cap_cmd_hdr *cmd,
5167 u16 cmd_len, void *data)
5169 struct l2cap_move_chan_cfm_rsp *rsp = data;
5170 struct l2cap_chan *chan;
5173 if (cmd_len != sizeof(*rsp))
5176 icid = le16_to_cpu(rsp->icid);
5178 BT_DBG("icid 0x%4.4x", icid);
5180 chan = l2cap_get_chan_by_scid(conn, icid);
5184 __clear_chan_timer(chan);
5186 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5187 chan->local_amp_id = chan->move_id;
5189 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5190 __release_logical_link(chan);
5192 l2cap_move_done(chan);
5195 l2cap_chan_unlock(chan);
5200 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5205 if (min > max || min < 6 || max > 3200)
5208 if (to_multiplier < 10 || to_multiplier > 3200)
5211 if (max >= to_multiplier * 8)
5214 max_latency = (to_multiplier * 8 / max) - 1;
5215 if (latency > 499 || latency > max_latency)
5221 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5222 struct l2cap_cmd_hdr *cmd,
5223 u16 cmd_len, u8 *data)
5225 struct hci_conn *hcon = conn->hcon;
5226 struct l2cap_conn_param_update_req *req;
5227 struct l2cap_conn_param_update_rsp rsp;
5228 u16 min, max, latency, to_multiplier;
5231 if (!(hcon->link_mode & HCI_LM_MASTER))
5234 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5237 req = (struct l2cap_conn_param_update_req *) data;
5238 min = __le16_to_cpu(req->min);
5239 max = __le16_to_cpu(req->max);
5240 latency = __le16_to_cpu(req->latency);
5241 to_multiplier = __le16_to_cpu(req->to_multiplier);
5243 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5244 min, max, latency, to_multiplier);
5246 memset(&rsp, 0, sizeof(rsp));
5248 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5250 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5252 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5254 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5258 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5263 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5264 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5267 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5268 u16 dcid, mtu, mps, credits, result;
5269 struct l2cap_chan *chan;
5272 if (cmd_len < sizeof(*rsp))
5275 dcid = __le16_to_cpu(rsp->dcid);
5276 mtu = __le16_to_cpu(rsp->mtu);
5277 mps = __le16_to_cpu(rsp->mps);
5278 credits = __le16_to_cpu(rsp->credits);
5279 result = __le16_to_cpu(rsp->result);
5281 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5284 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5285 dcid, mtu, mps, credits, result);
5287 mutex_lock(&conn->chan_lock);
5289 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5297 l2cap_chan_lock(chan);
5300 case L2CAP_CR_SUCCESS:
5304 chan->remote_mps = mps;
5305 chan->tx_credits = credits;
5306 l2cap_chan_ready(chan);
5310 l2cap_chan_del(chan, ECONNREFUSED);
5314 l2cap_chan_unlock(chan);
5317 mutex_unlock(&conn->chan_lock);
5322 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5323 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5328 switch (cmd->code) {
5329 case L2CAP_COMMAND_REJ:
5330 l2cap_command_rej(conn, cmd, cmd_len, data);
5333 case L2CAP_CONN_REQ:
5334 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5337 case L2CAP_CONN_RSP:
5338 case L2CAP_CREATE_CHAN_RSP:
5339 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5342 case L2CAP_CONF_REQ:
5343 err = l2cap_config_req(conn, cmd, cmd_len, data);
5346 case L2CAP_CONF_RSP:
5347 l2cap_config_rsp(conn, cmd, cmd_len, data);
5350 case L2CAP_DISCONN_REQ:
5351 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5354 case L2CAP_DISCONN_RSP:
5355 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5358 case L2CAP_ECHO_REQ:
5359 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5362 case L2CAP_ECHO_RSP:
5365 case L2CAP_INFO_REQ:
5366 err = l2cap_information_req(conn, cmd, cmd_len, data);
5369 case L2CAP_INFO_RSP:
5370 l2cap_information_rsp(conn, cmd, cmd_len, data);
5373 case L2CAP_CREATE_CHAN_REQ:
5374 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5377 case L2CAP_MOVE_CHAN_REQ:
5378 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5381 case L2CAP_MOVE_CHAN_RSP:
5382 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5385 case L2CAP_MOVE_CHAN_CFM:
5386 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5389 case L2CAP_MOVE_CHAN_CFM_RSP:
5390 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5394 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5402 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5403 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5406 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5407 struct l2cap_le_conn_rsp rsp;
5408 struct l2cap_chan *chan, *pchan;
5409 u16 dcid, scid, credits, mtu, mps;
5413 if (cmd_len != sizeof(*req))
5416 scid = __le16_to_cpu(req->scid);
5417 mtu = __le16_to_cpu(req->mtu);
5418 mps = __le16_to_cpu(req->mps);
5423 if (mtu < 23 || mps < 23)
5426 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5429 /* Check if we have socket listening on psm */
5430 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5431 &conn->hcon->dst, LE_LINK);
5433 result = L2CAP_CR_BAD_PSM;
5438 mutex_lock(&conn->chan_lock);
5439 l2cap_chan_lock(pchan);
5441 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5442 result = L2CAP_CR_AUTHENTICATION;
5444 goto response_unlock;
5447 /* Check if we already have channel with that dcid */
5448 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5449 result = L2CAP_CR_NO_MEM;
5451 goto response_unlock;
5454 chan = pchan->ops->new_connection(pchan);
5456 result = L2CAP_CR_NO_MEM;
5457 goto response_unlock;
5460 l2cap_le_flowctl_init(chan);
5462 bacpy(&chan->src, &conn->hcon->src);
5463 bacpy(&chan->dst, &conn->hcon->dst);
5464 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5465 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5469 chan->remote_mps = mps;
5470 chan->tx_credits = __le16_to_cpu(req->credits);
5472 __l2cap_chan_add(conn, chan);
5474 credits = chan->rx_credits;
5476 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5478 chan->ident = cmd->ident;
5480 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5481 l2cap_state_change(chan, BT_CONNECT2);
5482 result = L2CAP_CR_PEND;
5483 chan->ops->defer(chan);
5485 l2cap_chan_ready(chan);
5486 result = L2CAP_CR_SUCCESS;
5490 l2cap_chan_unlock(pchan);
5491 mutex_unlock(&conn->chan_lock);
5493 if (result == L2CAP_CR_PEND)
5498 rsp.mtu = cpu_to_le16(chan->imtu);
5499 rsp.mps = cpu_to_le16(chan->mps);
5505 rsp.dcid = cpu_to_le16(dcid);
5506 rsp.credits = cpu_to_le16(credits);
5507 rsp.result = cpu_to_le16(result);
5509 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5514 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5515 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5518 struct l2cap_le_credits *pkt;
5519 struct l2cap_chan *chan;
5520 u16 cid, credits, max_credits;
5522 if (cmd_len != sizeof(*pkt))
5525 pkt = (struct l2cap_le_credits *) data;
5526 cid = __le16_to_cpu(pkt->cid);
5527 credits = __le16_to_cpu(pkt->credits);
5529 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5531 chan = l2cap_get_chan_by_dcid(conn, cid);
5535 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5536 if (credits > max_credits) {
5537 BT_ERR("LE credits overflow");
5538 l2cap_send_disconn_req(chan, ECONNRESET);
5540 /* Return 0 so that we don't trigger an unnecessary
5541 * command reject packet.
5546 chan->tx_credits += credits;
5548 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5549 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5553 if (chan->tx_credits)
5554 chan->ops->resume(chan);
5556 l2cap_chan_unlock(chan);
5561 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5562 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5565 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5566 struct l2cap_chan *chan;
5568 if (cmd_len < sizeof(*rej))
5571 mutex_lock(&conn->chan_lock);
5573 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5577 l2cap_chan_lock(chan);
5578 l2cap_chan_del(chan, ECONNREFUSED);
5579 l2cap_chan_unlock(chan);
5582 mutex_unlock(&conn->chan_lock);
5586 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5587 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5592 switch (cmd->code) {
5593 case L2CAP_COMMAND_REJ:
5594 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5597 case L2CAP_CONN_PARAM_UPDATE_REQ:
5598 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5601 case L2CAP_CONN_PARAM_UPDATE_RSP:
5604 case L2CAP_LE_CONN_RSP:
5605 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5608 case L2CAP_LE_CONN_REQ:
5609 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5612 case L2CAP_LE_CREDITS:
5613 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5616 case L2CAP_DISCONN_REQ:
5617 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5620 case L2CAP_DISCONN_RSP:
5621 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5625 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5633 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5634 struct sk_buff *skb)
5636 struct hci_conn *hcon = conn->hcon;
5637 struct l2cap_cmd_hdr *cmd;
5641 if (hcon->type != LE_LINK)
5644 if (skb->len < L2CAP_CMD_HDR_SIZE)
5647 cmd = (void *) skb->data;
5648 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5650 len = le16_to_cpu(cmd->len);
5652 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5654 if (len != skb->len || !cmd->ident) {
5655 BT_DBG("corrupted command");
5659 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5661 struct l2cap_cmd_rej_unk rej;
5663 BT_ERR("Wrong link type (%d)", err);
5665 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5666 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5674 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5675 struct sk_buff *skb)
5677 struct hci_conn *hcon = conn->hcon;
5678 u8 *data = skb->data;
5680 struct l2cap_cmd_hdr cmd;
5683 l2cap_raw_recv(conn, skb);
5685 if (hcon->type != ACL_LINK)
5688 while (len >= L2CAP_CMD_HDR_SIZE) {
5690 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5691 data += L2CAP_CMD_HDR_SIZE;
5692 len -= L2CAP_CMD_HDR_SIZE;
5694 cmd_len = le16_to_cpu(cmd.len);
5696 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5699 if (cmd_len > len || !cmd.ident) {
5700 BT_DBG("corrupted command");
5704 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5706 struct l2cap_cmd_rej_unk rej;
5708 BT_ERR("Wrong link type (%d)", err);
5710 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5711 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5723 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5725 u16 our_fcs, rcv_fcs;
5728 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5729 hdr_size = L2CAP_EXT_HDR_SIZE;
5731 hdr_size = L2CAP_ENH_HDR_SIZE;
5733 if (chan->fcs == L2CAP_FCS_CRC16) {
5734 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5735 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5736 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5738 if (our_fcs != rcv_fcs)
5744 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5746 struct l2cap_ctrl control;
5748 BT_DBG("chan %p", chan);
5750 memset(&control, 0, sizeof(control));
5753 control.reqseq = chan->buffer_seq;
5754 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5756 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5757 control.super = L2CAP_SUPER_RNR;
5758 l2cap_send_sframe(chan, &control);
5761 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5762 chan->unacked_frames > 0)
5763 __set_retrans_timer(chan);
5765 /* Send pending iframes */
5766 l2cap_ertm_send(chan);
5768 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5769 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5770 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5773 control.super = L2CAP_SUPER_RR;
5774 l2cap_send_sframe(chan, &control);
5778 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5779 struct sk_buff **last_frag)
5781 /* skb->len reflects data in skb as well as all fragments
5782 * skb->data_len reflects only data in fragments
5784 if (!skb_has_frag_list(skb))
5785 skb_shinfo(skb)->frag_list = new_frag;
5787 new_frag->next = NULL;
5789 (*last_frag)->next = new_frag;
5790 *last_frag = new_frag;
5792 skb->len += new_frag->len;
5793 skb->data_len += new_frag->len;
5794 skb->truesize += new_frag->truesize;
5797 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5798 struct l2cap_ctrl *control)
5802 switch (control->sar) {
5803 case L2CAP_SAR_UNSEGMENTED:
5807 err = chan->ops->recv(chan, skb);
5810 case L2CAP_SAR_START:
5814 chan->sdu_len = get_unaligned_le16(skb->data);
5815 skb_pull(skb, L2CAP_SDULEN_SIZE);
5817 if (chan->sdu_len > chan->imtu) {
5822 if (skb->len >= chan->sdu_len)
5826 chan->sdu_last_frag = skb;
5832 case L2CAP_SAR_CONTINUE:
5836 append_skb_frag(chan->sdu, skb,
5837 &chan->sdu_last_frag);
5840 if (chan->sdu->len >= chan->sdu_len)
5850 append_skb_frag(chan->sdu, skb,
5851 &chan->sdu_last_frag);
5854 if (chan->sdu->len != chan->sdu_len)
5857 err = chan->ops->recv(chan, chan->sdu);
5860 /* Reassembly complete */
5862 chan->sdu_last_frag = NULL;
5870 kfree_skb(chan->sdu);
5872 chan->sdu_last_frag = NULL;
5879 static int l2cap_resegment(struct l2cap_chan *chan)
5885 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5889 if (chan->mode != L2CAP_MODE_ERTM)
5892 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5893 l2cap_tx(chan, NULL, NULL, event);
5896 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5899 /* Pass sequential frames to l2cap_reassemble_sdu()
5900 * until a gap is encountered.
5903 BT_DBG("chan %p", chan);
5905 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5906 struct sk_buff *skb;
5907 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5908 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5910 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5915 skb_unlink(skb, &chan->srej_q);
5916 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5917 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5922 if (skb_queue_empty(&chan->srej_q)) {
5923 chan->rx_state = L2CAP_RX_STATE_RECV;
5924 l2cap_send_ack(chan);
5930 static void l2cap_handle_srej(struct l2cap_chan *chan,
5931 struct l2cap_ctrl *control)
5933 struct sk_buff *skb;
5935 BT_DBG("chan %p, control %p", chan, control);
5937 if (control->reqseq == chan->next_tx_seq) {
5938 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5939 l2cap_send_disconn_req(chan, ECONNRESET);
5943 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5946 BT_DBG("Seq %d not available for retransmission",
5951 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5952 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5953 l2cap_send_disconn_req(chan, ECONNRESET);
5957 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5959 if (control->poll) {
5960 l2cap_pass_to_tx(chan, control);
5962 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5963 l2cap_retransmit(chan, control);
5964 l2cap_ertm_send(chan);
5966 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5967 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5968 chan->srej_save_reqseq = control->reqseq;
5971 l2cap_pass_to_tx_fbit(chan, control);
5973 if (control->final) {
5974 if (chan->srej_save_reqseq != control->reqseq ||
5975 !test_and_clear_bit(CONN_SREJ_ACT,
5977 l2cap_retransmit(chan, control);
5979 l2cap_retransmit(chan, control);
5980 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5981 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5982 chan->srej_save_reqseq = control->reqseq;
5988 static void l2cap_handle_rej(struct l2cap_chan *chan,
5989 struct l2cap_ctrl *control)
5991 struct sk_buff *skb;
5993 BT_DBG("chan %p, control %p", chan, control);
5995 if (control->reqseq == chan->next_tx_seq) {
5996 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5997 l2cap_send_disconn_req(chan, ECONNRESET);
6001 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6003 if (chan->max_tx && skb &&
6004 bt_cb(skb)->control.retries >= chan->max_tx) {
6005 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6006 l2cap_send_disconn_req(chan, ECONNRESET);
6010 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6012 l2cap_pass_to_tx(chan, control);
6014 if (control->final) {
6015 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6016 l2cap_retransmit_all(chan, control);
6018 l2cap_retransmit_all(chan, control);
6019 l2cap_ertm_send(chan);
6020 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6021 set_bit(CONN_REJ_ACT, &chan->conn_state);
6025 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6027 BT_DBG("chan %p, txseq %d", chan, txseq);
6029 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6030 chan->expected_tx_seq);
6032 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6033 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6035 /* See notes below regarding "double poll" and
6038 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6039 BT_DBG("Invalid/Ignore - after SREJ");
6040 return L2CAP_TXSEQ_INVALID_IGNORE;
6042 BT_DBG("Invalid - in window after SREJ sent");
6043 return L2CAP_TXSEQ_INVALID;
6047 if (chan->srej_list.head == txseq) {
6048 BT_DBG("Expected SREJ");
6049 return L2CAP_TXSEQ_EXPECTED_SREJ;
6052 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6053 BT_DBG("Duplicate SREJ - txseq already stored");
6054 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6057 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6058 BT_DBG("Unexpected SREJ - not requested");
6059 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6063 if (chan->expected_tx_seq == txseq) {
6064 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6066 BT_DBG("Invalid - txseq outside tx window");
6067 return L2CAP_TXSEQ_INVALID;
6070 return L2CAP_TXSEQ_EXPECTED;
6074 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6075 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6076 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6077 return L2CAP_TXSEQ_DUPLICATE;
6080 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6081 /* A source of invalid packets is a "double poll" condition,
6082 * where delays cause us to send multiple poll packets. If
6083 * the remote stack receives and processes both polls,
6084 * sequence numbers can wrap around in such a way that a
6085 * resent frame has a sequence number that looks like new data
6086 * with a sequence gap. This would trigger an erroneous SREJ
6089 * Fortunately, this is impossible with a tx window that's
6090 * less than half of the maximum sequence number, which allows
6091 * invalid frames to be safely ignored.
6093 * With tx window sizes greater than half of the tx window
6094 * maximum, the frame is invalid and cannot be ignored. This
6095 * causes a disconnect.
6098 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6099 BT_DBG("Invalid/Ignore - txseq outside tx window");
6100 return L2CAP_TXSEQ_INVALID_IGNORE;
6102 BT_DBG("Invalid - txseq outside tx window");
6103 return L2CAP_TXSEQ_INVALID;
6106 BT_DBG("Unexpected - txseq indicates missing frames");
6107 return L2CAP_TXSEQ_UNEXPECTED;
6111 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6112 struct l2cap_ctrl *control,
6113 struct sk_buff *skb, u8 event)
6116 bool skb_in_use = false;
6118 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6122 case L2CAP_EV_RECV_IFRAME:
6123 switch (l2cap_classify_txseq(chan, control->txseq)) {
6124 case L2CAP_TXSEQ_EXPECTED:
6125 l2cap_pass_to_tx(chan, control);
6127 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6128 BT_DBG("Busy, discarding expected seq %d",
6133 chan->expected_tx_seq = __next_seq(chan,
6136 chan->buffer_seq = chan->expected_tx_seq;
6139 err = l2cap_reassemble_sdu(chan, skb, control);
6143 if (control->final) {
6144 if (!test_and_clear_bit(CONN_REJ_ACT,
6145 &chan->conn_state)) {
6147 l2cap_retransmit_all(chan, control);
6148 l2cap_ertm_send(chan);
6152 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6153 l2cap_send_ack(chan);
6155 case L2CAP_TXSEQ_UNEXPECTED:
6156 l2cap_pass_to_tx(chan, control);
6158 /* Can't issue SREJ frames in the local busy state.
6159 * Drop this frame, it will be seen as missing
6160 * when local busy is exited.
6162 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6163 BT_DBG("Busy, discarding unexpected seq %d",
6168 /* There was a gap in the sequence, so an SREJ
6169 * must be sent for each missing frame. The
6170 * current frame is stored for later use.
6172 skb_queue_tail(&chan->srej_q, skb);
6174 BT_DBG("Queued %p (queue len %d)", skb,
6175 skb_queue_len(&chan->srej_q));
6177 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6178 l2cap_seq_list_clear(&chan->srej_list);
6179 l2cap_send_srej(chan, control->txseq);
6181 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6183 case L2CAP_TXSEQ_DUPLICATE:
6184 l2cap_pass_to_tx(chan, control);
6186 case L2CAP_TXSEQ_INVALID_IGNORE:
6188 case L2CAP_TXSEQ_INVALID:
6190 l2cap_send_disconn_req(chan, ECONNRESET);
6194 case L2CAP_EV_RECV_RR:
6195 l2cap_pass_to_tx(chan, control);
6196 if (control->final) {
6197 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6199 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6200 !__chan_is_moving(chan)) {
6202 l2cap_retransmit_all(chan, control);
6205 l2cap_ertm_send(chan);
6206 } else if (control->poll) {
6207 l2cap_send_i_or_rr_or_rnr(chan);
6209 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6210 &chan->conn_state) &&
6211 chan->unacked_frames)
6212 __set_retrans_timer(chan);
6214 l2cap_ertm_send(chan);
6217 case L2CAP_EV_RECV_RNR:
6218 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6219 l2cap_pass_to_tx(chan, control);
6220 if (control && control->poll) {
6221 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6222 l2cap_send_rr_or_rnr(chan, 0);
6224 __clear_retrans_timer(chan);
6225 l2cap_seq_list_clear(&chan->retrans_list);
6227 case L2CAP_EV_RECV_REJ:
6228 l2cap_handle_rej(chan, control);
6230 case L2CAP_EV_RECV_SREJ:
6231 l2cap_handle_srej(chan, control);
6237 if (skb && !skb_in_use) {
6238 BT_DBG("Freeing %p", skb);
6245 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6246 struct l2cap_ctrl *control,
6247 struct sk_buff *skb, u8 event)
6250 u16 txseq = control->txseq;
6251 bool skb_in_use = false;
6253 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6257 case L2CAP_EV_RECV_IFRAME:
6258 switch (l2cap_classify_txseq(chan, txseq)) {
6259 case L2CAP_TXSEQ_EXPECTED:
6260 /* Keep frame for reassembly later */
6261 l2cap_pass_to_tx(chan, control);
6262 skb_queue_tail(&chan->srej_q, skb);
6264 BT_DBG("Queued %p (queue len %d)", skb,
6265 skb_queue_len(&chan->srej_q));
6267 chan->expected_tx_seq = __next_seq(chan, txseq);
6269 case L2CAP_TXSEQ_EXPECTED_SREJ:
6270 l2cap_seq_list_pop(&chan->srej_list);
6272 l2cap_pass_to_tx(chan, control);
6273 skb_queue_tail(&chan->srej_q, skb);
6275 BT_DBG("Queued %p (queue len %d)", skb,
6276 skb_queue_len(&chan->srej_q));
6278 err = l2cap_rx_queued_iframes(chan);
6283 case L2CAP_TXSEQ_UNEXPECTED:
6284 /* Got a frame that can't be reassembled yet.
6285 * Save it for later, and send SREJs to cover
6286 * the missing frames.
6288 skb_queue_tail(&chan->srej_q, skb);
6290 BT_DBG("Queued %p (queue len %d)", skb,
6291 skb_queue_len(&chan->srej_q));
6293 l2cap_pass_to_tx(chan, control);
6294 l2cap_send_srej(chan, control->txseq);
6296 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6297 /* This frame was requested with an SREJ, but
6298 * some expected retransmitted frames are
6299 * missing. Request retransmission of missing
6302 skb_queue_tail(&chan->srej_q, skb);
6304 BT_DBG("Queued %p (queue len %d)", skb,
6305 skb_queue_len(&chan->srej_q));
6307 l2cap_pass_to_tx(chan, control);
6308 l2cap_send_srej_list(chan, control->txseq);
6310 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6311 /* We've already queued this frame. Drop this copy. */
6312 l2cap_pass_to_tx(chan, control);
6314 case L2CAP_TXSEQ_DUPLICATE:
6315 /* Expecting a later sequence number, so this frame
6316 * was already received. Ignore it completely.
6319 case L2CAP_TXSEQ_INVALID_IGNORE:
6321 case L2CAP_TXSEQ_INVALID:
6323 l2cap_send_disconn_req(chan, ECONNRESET);
6327 case L2CAP_EV_RECV_RR:
6328 l2cap_pass_to_tx(chan, control);
6329 if (control->final) {
6330 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6332 if (!test_and_clear_bit(CONN_REJ_ACT,
6333 &chan->conn_state)) {
6335 l2cap_retransmit_all(chan, control);
6338 l2cap_ertm_send(chan);
6339 } else if (control->poll) {
6340 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6341 &chan->conn_state) &&
6342 chan->unacked_frames) {
6343 __set_retrans_timer(chan);
6346 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6347 l2cap_send_srej_tail(chan);
6349 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6350 &chan->conn_state) &&
6351 chan->unacked_frames)
6352 __set_retrans_timer(chan);
6354 l2cap_send_ack(chan);
6357 case L2CAP_EV_RECV_RNR:
6358 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6359 l2cap_pass_to_tx(chan, control);
6360 if (control->poll) {
6361 l2cap_send_srej_tail(chan);
6363 struct l2cap_ctrl rr_control;
6364 memset(&rr_control, 0, sizeof(rr_control));
6365 rr_control.sframe = 1;
6366 rr_control.super = L2CAP_SUPER_RR;
6367 rr_control.reqseq = chan->buffer_seq;
6368 l2cap_send_sframe(chan, &rr_control);
6372 case L2CAP_EV_RECV_REJ:
6373 l2cap_handle_rej(chan, control);
6375 case L2CAP_EV_RECV_SREJ:
6376 l2cap_handle_srej(chan, control);
6380 if (skb && !skb_in_use) {
6381 BT_DBG("Freeing %p", skb);
6388 static int l2cap_finish_move(struct l2cap_chan *chan)
6390 BT_DBG("chan %p", chan);
6392 chan->rx_state = L2CAP_RX_STATE_RECV;
6395 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6397 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6399 return l2cap_resegment(chan);
6402 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6403 struct l2cap_ctrl *control,
6404 struct sk_buff *skb, u8 event)
6408 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6414 l2cap_process_reqseq(chan, control->reqseq);
6416 if (!skb_queue_empty(&chan->tx_q))
6417 chan->tx_send_head = skb_peek(&chan->tx_q);
6419 chan->tx_send_head = NULL;
6421 /* Rewind next_tx_seq to the point expected
6424 chan->next_tx_seq = control->reqseq;
6425 chan->unacked_frames = 0;
6427 err = l2cap_finish_move(chan);
6431 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6432 l2cap_send_i_or_rr_or_rnr(chan);
6434 if (event == L2CAP_EV_RECV_IFRAME)
6437 return l2cap_rx_state_recv(chan, control, NULL, event);
6440 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6441 struct l2cap_ctrl *control,
6442 struct sk_buff *skb, u8 event)
6446 if (!control->final)
6449 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6451 chan->rx_state = L2CAP_RX_STATE_RECV;
6452 l2cap_process_reqseq(chan, control->reqseq);
6454 if (!skb_queue_empty(&chan->tx_q))
6455 chan->tx_send_head = skb_peek(&chan->tx_q);
6457 chan->tx_send_head = NULL;
6459 /* Rewind next_tx_seq to the point expected
6462 chan->next_tx_seq = control->reqseq;
6463 chan->unacked_frames = 0;
6466 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6468 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6470 err = l2cap_resegment(chan);
6473 err = l2cap_rx_state_recv(chan, control, skb, event);
6478 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6480 /* Make sure reqseq is for a packet that has been sent but not acked */
6483 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6484 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6487 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6488 struct sk_buff *skb, u8 event)
6492 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6493 control, skb, event, chan->rx_state);
6495 if (__valid_reqseq(chan, control->reqseq)) {
6496 switch (chan->rx_state) {
6497 case L2CAP_RX_STATE_RECV:
6498 err = l2cap_rx_state_recv(chan, control, skb, event);
6500 case L2CAP_RX_STATE_SREJ_SENT:
6501 err = l2cap_rx_state_srej_sent(chan, control, skb,
6504 case L2CAP_RX_STATE_WAIT_P:
6505 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6507 case L2CAP_RX_STATE_WAIT_F:
6508 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6515 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6516 control->reqseq, chan->next_tx_seq,
6517 chan->expected_ack_seq);
6518 l2cap_send_disconn_req(chan, ECONNRESET);
6524 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6525 struct sk_buff *skb)
6529 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6532 if (l2cap_classify_txseq(chan, control->txseq) ==
6533 L2CAP_TXSEQ_EXPECTED) {
6534 l2cap_pass_to_tx(chan, control);
6536 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6537 __next_seq(chan, chan->buffer_seq));
6539 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6541 l2cap_reassemble_sdu(chan, skb, control);
6544 kfree_skb(chan->sdu);
6547 chan->sdu_last_frag = NULL;
6551 BT_DBG("Freeing %p", skb);
6556 chan->last_acked_seq = control->txseq;
6557 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6562 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6564 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6568 __unpack_control(chan, skb);
6573 * We can just drop the corrupted I-frame here.
6574 * Receiver will miss it and start proper recovery
6575 * procedures and ask for retransmission.
6577 if (l2cap_check_fcs(chan, skb))
6580 if (!control->sframe && control->sar == L2CAP_SAR_START)
6581 len -= L2CAP_SDULEN_SIZE;
6583 if (chan->fcs == L2CAP_FCS_CRC16)
6584 len -= L2CAP_FCS_SIZE;
6586 if (len > chan->mps) {
6587 l2cap_send_disconn_req(chan, ECONNRESET);
6591 if (!control->sframe) {
6594 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6595 control->sar, control->reqseq, control->final,
6598 /* Validate F-bit - F=0 always valid, F=1 only
6599 * valid in TX WAIT_F
6601 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6604 if (chan->mode != L2CAP_MODE_STREAMING) {
6605 event = L2CAP_EV_RECV_IFRAME;
6606 err = l2cap_rx(chan, control, skb, event);
6608 err = l2cap_stream_rx(chan, control, skb);
6612 l2cap_send_disconn_req(chan, ECONNRESET);
6614 const u8 rx_func_to_event[4] = {
6615 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6616 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6619 /* Only I-frames are expected in streaming mode */
6620 if (chan->mode == L2CAP_MODE_STREAMING)
6623 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6624 control->reqseq, control->final, control->poll,
6628 BT_ERR("Trailing bytes: %d in sframe", len);
6629 l2cap_send_disconn_req(chan, ECONNRESET);
6633 /* Validate F and P bits */
6634 if (control->final && (control->poll ||
6635 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6638 event = rx_func_to_event[control->super];
6639 if (l2cap_rx(chan, control, skb, event))
6640 l2cap_send_disconn_req(chan, ECONNRESET);
6650 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6652 struct l2cap_conn *conn = chan->conn;
6653 struct l2cap_le_credits pkt;
6656 /* We return more credits to the sender only after the amount of
6657 * credits falls below half of the initial amount.
6659 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6662 return_credits = le_max_credits - chan->rx_credits;
6664 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6666 chan->rx_credits += return_credits;
6668 pkt.cid = cpu_to_le16(chan->scid);
6669 pkt.credits = cpu_to_le16(return_credits);
6671 chan->ident = l2cap_get_ident(conn);
6673 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6676 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6680 if (!chan->rx_credits) {
6681 BT_ERR("No credits to receive LE L2CAP data");
6682 l2cap_send_disconn_req(chan, ECONNRESET);
6686 if (chan->imtu < skb->len) {
6687 BT_ERR("Too big LE L2CAP PDU");
6692 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6694 l2cap_chan_le_send_credits(chan);
6701 sdu_len = get_unaligned_le16(skb->data);
6702 skb_pull(skb, L2CAP_SDULEN_SIZE);
6704 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6705 sdu_len, skb->len, chan->imtu);
6707 if (sdu_len > chan->imtu) {
6708 BT_ERR("Too big LE L2CAP SDU length received");
6713 if (skb->len > sdu_len) {
6714 BT_ERR("Too much LE L2CAP data received");
6719 if (skb->len == sdu_len)
6720 return chan->ops->recv(chan, skb);
6723 chan->sdu_len = sdu_len;
6724 chan->sdu_last_frag = skb;
6729 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6730 chan->sdu->len, skb->len, chan->sdu_len);
6732 if (chan->sdu->len + skb->len > chan->sdu_len) {
6733 BT_ERR("Too much LE L2CAP data received");
6738 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6741 if (chan->sdu->len == chan->sdu_len) {
6742 err = chan->ops->recv(chan, chan->sdu);
6745 chan->sdu_last_frag = NULL;
6753 kfree_skb(chan->sdu);
6755 chan->sdu_last_frag = NULL;
6759 /* We can't return an error here since we took care of the skb
6760 * freeing internally. An error return would cause the caller to
6761 * do a double-free of the skb.
6766 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6767 struct sk_buff *skb)
6769 struct l2cap_chan *chan;
6771 chan = l2cap_get_chan_by_scid(conn, cid);
6773 if (cid == L2CAP_CID_A2MP) {
6774 chan = a2mp_channel_create(conn, skb);
6780 l2cap_chan_lock(chan);
6782 BT_DBG("unknown cid 0x%4.4x", cid);
6783 /* Drop packet and return */
6789 BT_DBG("chan %p, len %d", chan, skb->len);
6791 if (chan->state != BT_CONNECTED)
6794 switch (chan->mode) {
6795 case L2CAP_MODE_LE_FLOWCTL:
6796 if (l2cap_le_data_rcv(chan, skb) < 0)
6801 case L2CAP_MODE_BASIC:
6802 /* If socket recv buffers overflows we drop data here
6803 * which is *bad* because L2CAP has to be reliable.
6804 * But we don't have any other choice. L2CAP doesn't
6805 * provide flow control mechanism. */
6807 if (chan->imtu < skb->len) {
6808 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6812 if (!chan->ops->recv(chan, skb))
6816 case L2CAP_MODE_ERTM:
6817 case L2CAP_MODE_STREAMING:
6818 l2cap_data_rcv(chan, skb);
6822 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6830 l2cap_chan_unlock(chan);
6833 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6834 struct sk_buff *skb)
6836 struct hci_conn *hcon = conn->hcon;
6837 struct l2cap_chan *chan;
6839 if (hcon->type != ACL_LINK)
6842 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6847 BT_DBG("chan %p, len %d", chan, skb->len);
6849 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6852 if (chan->imtu < skb->len)
6855 /* Store remote BD_ADDR and PSM for msg_name */
6856 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6857 bt_cb(skb)->psm = psm;
6859 if (!chan->ops->recv(chan, skb))
6866 static void l2cap_att_channel(struct l2cap_conn *conn,
6867 struct sk_buff *skb)
6869 struct hci_conn *hcon = conn->hcon;
6870 struct l2cap_chan *chan;
6872 if (hcon->type != LE_LINK)
6875 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6876 &hcon->src, &hcon->dst);
6880 BT_DBG("chan %p, len %d", chan, skb->len);
6882 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6885 if (chan->imtu < skb->len)
6888 if (!chan->ops->recv(chan, skb))
6895 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6897 struct l2cap_hdr *lh = (void *) skb->data;
6898 struct hci_conn *hcon = conn->hcon;
6902 if (hcon->state != BT_CONNECTED) {
6903 BT_DBG("queueing pending rx skb");
6904 skb_queue_tail(&conn->pending_rx, skb);
6908 skb_pull(skb, L2CAP_HDR_SIZE);
6909 cid = __le16_to_cpu(lh->cid);
6910 len = __le16_to_cpu(lh->len);
6912 if (len != skb->len) {
6917 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6920 case L2CAP_CID_SIGNALING:
6921 l2cap_sig_channel(conn, skb);
6924 case L2CAP_CID_CONN_LESS:
6925 psm = get_unaligned((__le16 *) skb->data);
6926 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6927 l2cap_conless_channel(conn, psm, skb);
6931 l2cap_att_channel(conn, skb);
6934 case L2CAP_CID_LE_SIGNALING:
6935 l2cap_le_sig_channel(conn, skb);
6939 if (smp_sig_channel(conn, skb))
6940 l2cap_conn_del(conn->hcon, EACCES);
6943 case L2CAP_FC_6LOWPAN:
6944 bt_6lowpan_recv(conn, skb);
6948 l2cap_data_channel(conn, cid, skb);
6953 static void process_pending_rx(struct work_struct *work)
6955 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6957 struct sk_buff *skb;
6961 while ((skb = skb_dequeue(&conn->pending_rx)))
6962 l2cap_recv_frame(conn, skb);
6965 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6967 struct l2cap_conn *conn = hcon->l2cap_data;
6968 struct hci_chan *hchan;
6973 hchan = hci_chan_create(hcon);
6977 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6979 hci_chan_del(hchan);
6983 kref_init(&conn->ref);
6984 hcon->l2cap_data = conn;
6986 hci_conn_get(conn->hcon);
6987 conn->hchan = hchan;
6989 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6991 switch (hcon->type) {
6993 if (hcon->hdev->le_mtu) {
6994 conn->mtu = hcon->hdev->le_mtu;
6999 conn->mtu = hcon->hdev->acl_mtu;
7003 conn->feat_mask = 0;
7005 if (hcon->type == ACL_LINK)
7006 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7007 &hcon->hdev->dev_flags);
7009 spin_lock_init(&conn->lock);
7010 mutex_init(&conn->chan_lock);
7012 INIT_LIST_HEAD(&conn->chan_l);
7013 INIT_LIST_HEAD(&conn->users);
7015 if (hcon->type == LE_LINK)
7016 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7018 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7020 skb_queue_head_init(&conn->pending_rx);
7021 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7023 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7028 static bool is_valid_psm(u16 psm, u8 dst_type) {
7032 if (bdaddr_type_is_le(dst_type))
7033 return (psm <= 0x00ff);
7035 /* PSM must be odd and lsb of upper byte must be 0 */
7036 return ((psm & 0x0101) == 0x0001);
7039 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7040 bdaddr_t *dst, u8 dst_type)
7042 struct l2cap_conn *conn;
7043 struct hci_conn *hcon;
7044 struct hci_dev *hdev;
7048 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7049 dst_type, __le16_to_cpu(psm));
7051 hdev = hci_get_route(dst, &chan->src);
7053 return -EHOSTUNREACH;
7057 l2cap_chan_lock(chan);
7059 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7060 chan->chan_type != L2CAP_CHAN_RAW) {
7065 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7070 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7075 switch (chan->mode) {
7076 case L2CAP_MODE_BASIC:
7078 case L2CAP_MODE_LE_FLOWCTL:
7079 l2cap_le_flowctl_init(chan);
7081 case L2CAP_MODE_ERTM:
7082 case L2CAP_MODE_STREAMING:
7091 switch (chan->state) {
7095 /* Already connecting */
7100 /* Already connected */
7114 /* Set destination address and psm */
7115 bacpy(&chan->dst, dst);
7116 chan->dst_type = dst_type;
7121 auth_type = l2cap_get_auth_type(chan);
7123 if (bdaddr_type_is_le(dst_type)) {
7124 /* Convert from L2CAP channel address type to HCI address type
7126 if (dst_type == BDADDR_LE_PUBLIC)
7127 dst_type = ADDR_LE_DEV_PUBLIC;
7129 dst_type = ADDR_LE_DEV_RANDOM;
7131 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7134 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7138 err = PTR_ERR(hcon);
7142 conn = l2cap_conn_add(hcon);
7144 hci_conn_drop(hcon);
7149 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7150 hci_conn_drop(hcon);
7155 /* Update source addr of the socket */
7156 bacpy(&chan->src, &hcon->src);
7157 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7159 l2cap_chan_unlock(chan);
7160 l2cap_chan_add(conn, chan);
7161 l2cap_chan_lock(chan);
7163 /* l2cap_chan_add takes its own ref so we can drop this one */
7164 hci_conn_drop(hcon);
7166 l2cap_state_change(chan, BT_CONNECT);
7167 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7169 /* Release chan->sport so that it can be reused by other
7170 * sockets (as it's only used for listening sockets).
7172 write_lock(&chan_list_lock);
7174 write_unlock(&chan_list_lock);
7176 if (hcon->state == BT_CONNECTED) {
7177 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7178 __clear_chan_timer(chan);
7179 if (l2cap_chan_check_security(chan))
7180 l2cap_state_change(chan, BT_CONNECTED);
7182 l2cap_do_start(chan);
7188 l2cap_chan_unlock(chan);
7189 hci_dev_unlock(hdev);
7194 /* ---- L2CAP interface with lower layer (HCI) ---- */
7196 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7198 int exact = 0, lm1 = 0, lm2 = 0;
7199 struct l2cap_chan *c;
7201 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7203 /* Find listening sockets and check their link_mode */
7204 read_lock(&chan_list_lock);
7205 list_for_each_entry(c, &chan_list, global_l) {
7206 if (c->state != BT_LISTEN)
7209 if (!bacmp(&c->src, &hdev->bdaddr)) {
7210 lm1 |= HCI_LM_ACCEPT;
7211 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7212 lm1 |= HCI_LM_MASTER;
7214 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7215 lm2 |= HCI_LM_ACCEPT;
7216 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7217 lm2 |= HCI_LM_MASTER;
7220 read_unlock(&chan_list_lock);
7222 return exact ? lm1 : lm2;
7225 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7227 struct l2cap_conn *conn;
7229 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7232 conn = l2cap_conn_add(hcon);
7234 l2cap_conn_ready(conn);
7236 l2cap_conn_del(hcon, bt_to_errno(status));
7240 int l2cap_disconn_ind(struct hci_conn *hcon)
7242 struct l2cap_conn *conn = hcon->l2cap_data;
7244 BT_DBG("hcon %p", hcon);
7247 return HCI_ERROR_REMOTE_USER_TERM;
7248 return conn->disc_reason;
7251 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7253 BT_DBG("hcon %p reason %d", hcon, reason);
7255 bt_6lowpan_del_conn(hcon->l2cap_data);
7257 l2cap_conn_del(hcon, bt_to_errno(reason));
7260 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7262 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7265 if (encrypt == 0x00) {
7266 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7267 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7268 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7269 chan->sec_level == BT_SECURITY_FIPS)
7270 l2cap_chan_close(chan, ECONNREFUSED);
7272 if (chan->sec_level == BT_SECURITY_MEDIUM)
7273 __clear_chan_timer(chan);
7277 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7279 struct l2cap_conn *conn = hcon->l2cap_data;
7280 struct l2cap_chan *chan;
7285 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7287 if (hcon->type == LE_LINK) {
7288 if (!status && encrypt)
7289 smp_distribute_keys(conn);
7290 cancel_delayed_work(&conn->security_timer);
7293 mutex_lock(&conn->chan_lock);
7295 list_for_each_entry(chan, &conn->chan_l, list) {
7296 l2cap_chan_lock(chan);
7298 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7299 state_to_string(chan->state));
7301 if (chan->scid == L2CAP_CID_A2MP) {
7302 l2cap_chan_unlock(chan);
7306 if (chan->scid == L2CAP_CID_ATT) {
7307 if (!status && encrypt) {
7308 chan->sec_level = hcon->sec_level;
7309 l2cap_chan_ready(chan);
7312 l2cap_chan_unlock(chan);
7316 if (!__l2cap_no_conn_pending(chan)) {
7317 l2cap_chan_unlock(chan);
7321 if (!status && (chan->state == BT_CONNECTED ||
7322 chan->state == BT_CONFIG)) {
7323 chan->ops->resume(chan);
7324 l2cap_check_encryption(chan, encrypt);
7325 l2cap_chan_unlock(chan);
7329 if (chan->state == BT_CONNECT) {
7331 l2cap_start_connection(chan);
7333 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7334 } else if (chan->state == BT_CONNECT2) {
7335 struct l2cap_conn_rsp rsp;
7339 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7340 res = L2CAP_CR_PEND;
7341 stat = L2CAP_CS_AUTHOR_PEND;
7342 chan->ops->defer(chan);
7344 l2cap_state_change(chan, BT_CONFIG);
7345 res = L2CAP_CR_SUCCESS;
7346 stat = L2CAP_CS_NO_INFO;
7349 l2cap_state_change(chan, BT_DISCONN);
7350 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7351 res = L2CAP_CR_SEC_BLOCK;
7352 stat = L2CAP_CS_NO_INFO;
7355 rsp.scid = cpu_to_le16(chan->dcid);
7356 rsp.dcid = cpu_to_le16(chan->scid);
7357 rsp.result = cpu_to_le16(res);
7358 rsp.status = cpu_to_le16(stat);
7359 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7362 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7363 res == L2CAP_CR_SUCCESS) {
7365 set_bit(CONF_REQ_SENT, &chan->conf_state);
7366 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7368 l2cap_build_conf_req(chan, buf),
7370 chan->num_conf_req++;
7374 l2cap_chan_unlock(chan);
7377 mutex_unlock(&conn->chan_lock);
7382 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7384 struct l2cap_conn *conn = hcon->l2cap_data;
7385 struct l2cap_hdr *hdr;
7388 /* For AMP controller do not create l2cap conn */
7389 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7393 conn = l2cap_conn_add(hcon);
7398 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7402 case ACL_START_NO_FLUSH:
7405 BT_ERR("Unexpected start frame (len %d)", skb->len);
7406 kfree_skb(conn->rx_skb);
7407 conn->rx_skb = NULL;
7409 l2cap_conn_unreliable(conn, ECOMM);
7412 /* Start fragment always begin with Basic L2CAP header */
7413 if (skb->len < L2CAP_HDR_SIZE) {
7414 BT_ERR("Frame is too short (len %d)", skb->len);
7415 l2cap_conn_unreliable(conn, ECOMM);
7419 hdr = (struct l2cap_hdr *) skb->data;
7420 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7422 if (len == skb->len) {
7423 /* Complete frame received */
7424 l2cap_recv_frame(conn, skb);
7428 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7430 if (skb->len > len) {
7431 BT_ERR("Frame is too long (len %d, expected len %d)",
7433 l2cap_conn_unreliable(conn, ECOMM);
7437 /* Allocate skb for the complete frame (with header) */
7438 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7442 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7444 conn->rx_len = len - skb->len;
7448 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7450 if (!conn->rx_len) {
7451 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7452 l2cap_conn_unreliable(conn, ECOMM);
7456 if (skb->len > conn->rx_len) {
7457 BT_ERR("Fragment is too long (len %d, expected %d)",
7458 skb->len, conn->rx_len);
7459 kfree_skb(conn->rx_skb);
7460 conn->rx_skb = NULL;
7462 l2cap_conn_unreliable(conn, ECOMM);
7466 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7468 conn->rx_len -= skb->len;
7470 if (!conn->rx_len) {
7471 /* Complete frame received. l2cap_recv_frame
7472 * takes ownership of the skb so set the global
7473 * rx_skb pointer to NULL first.
7475 struct sk_buff *rx_skb = conn->rx_skb;
7476 conn->rx_skb = NULL;
7477 l2cap_recv_frame(conn, rx_skb);
7487 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7489 struct l2cap_chan *c;
7491 read_lock(&chan_list_lock);
7493 list_for_each_entry(c, &chan_list, global_l) {
7494 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7496 c->state, __le16_to_cpu(c->psm),
7497 c->scid, c->dcid, c->imtu, c->omtu,
7498 c->sec_level, c->mode);
7501 read_unlock(&chan_list_lock);
7506 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7508 return single_open(file, l2cap_debugfs_show, inode->i_private);
7511 static const struct file_operations l2cap_debugfs_fops = {
7512 .open = l2cap_debugfs_open,
7514 .llseek = seq_lseek,
7515 .release = single_release,
7518 static struct dentry *l2cap_debugfs;
7520 int __init l2cap_init(void)
7524 err = l2cap_init_sockets();
7528 if (IS_ERR_OR_NULL(bt_debugfs))
7531 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7532 NULL, &l2cap_debugfs_fops);
7534 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7536 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7544 void l2cap_exit(void)
7546 bt_6lowpan_cleanup();
7547 debugfs_remove(l2cap_debugfs);
7548 l2cap_cleanup_sockets();
7551 module_param(disable_ertm, bool, 0644);
7552 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");