2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 /* ---- L2CAP channels ---- */
64 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
69 list_for_each_entry(c, &conn->chan_l, list) {
76 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 /* Find channel with given SCID.
89 * Returns locked channel. */
90 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 mutex_lock(&conn->chan_lock);
96 c = __l2cap_get_chan_by_scid(conn, cid);
99 mutex_unlock(&conn->chan_lock);
104 /* Find channel with given DCID.
105 * Returns locked channel.
107 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
110 struct l2cap_chan *c;
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_dcid(conn, cid);
116 mutex_unlock(&conn->chan_lock);
121 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
124 struct l2cap_chan *c;
126 list_for_each_entry(c, &conn->chan_l, list) {
127 if (c->ident == ident)
133 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_ident(conn, ident);
142 mutex_unlock(&conn->chan_lock);
147 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
149 struct l2cap_chan *c;
151 list_for_each_entry(c, &chan_list, global_l) {
152 if (c->sport == psm && !bacmp(&c->src, src))
158 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
162 write_lock(&chan_list_lock);
164 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 for (p = 0x1001; p < 0x1100; p += 2)
178 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
179 chan->psm = cpu_to_le16(p);
180 chan->sport = cpu_to_le16(p);
187 write_unlock(&chan_list_lock);
191 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
193 write_lock(&chan_list_lock);
197 write_unlock(&chan_list_lock);
202 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
204 u16 cid = L2CAP_CID_DYN_START;
206 for (; cid < L2CAP_CID_DYN_END; cid++) {
207 if (!__l2cap_get_chan_by_scid(conn, cid))
214 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
216 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
217 state_to_string(state));
220 chan->ops->state_change(chan, state);
223 static void l2cap_state_change(struct l2cap_chan *chan, int state)
225 struct sock *sk = chan->sk;
228 __l2cap_state_change(chan, state);
232 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
234 struct sock *sk = chan->sk;
239 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
241 struct sock *sk = chan->sk;
244 __l2cap_chan_set_err(chan, err);
248 static void __set_retrans_timer(struct l2cap_chan *chan)
250 if (!delayed_work_pending(&chan->monitor_timer) &&
251 chan->retrans_timeout) {
252 l2cap_set_timer(chan, &chan->retrans_timer,
253 msecs_to_jiffies(chan->retrans_timeout));
257 static void __set_monitor_timer(struct l2cap_chan *chan)
259 __clear_retrans_timer(chan);
260 if (chan->monitor_timeout) {
261 l2cap_set_timer(chan, &chan->monitor_timer,
262 msecs_to_jiffies(chan->monitor_timeout));
266 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
271 skb_queue_walk(head, skb) {
272 if (bt_cb(skb)->control.txseq == seq)
279 /* ---- L2CAP sequence number lists ---- */
281 /* For ERTM, ordered lists of sequence numbers must be tracked for
282 * SREJ requests that are received and for frames that are to be
283 * retransmitted. These seq_list functions implement a singly-linked
284 * list in an array, where membership in the list can also be checked
285 * in constant time. Items can also be added to the tail of the list
286 * and removed from the head in constant time, without further memory
290 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
292 size_t alloc_size, i;
294 /* Allocated size is a power of 2 to map sequence numbers
295 * (which may be up to 14 bits) in to a smaller array that is
296 * sized for the negotiated ERTM transmit windows.
298 alloc_size = roundup_pow_of_two(size);
300 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
304 seq_list->mask = alloc_size - 1;
305 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
306 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
307 for (i = 0; i < alloc_size; i++)
308 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
313 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
315 kfree(seq_list->list);
318 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
321 /* Constant-time check for list membership */
322 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
325 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
327 u16 mask = seq_list->mask;
329 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
330 /* In case someone tries to pop the head of an empty list */
331 return L2CAP_SEQ_LIST_CLEAR;
332 } else if (seq_list->head == seq) {
333 /* Head can be removed in constant time */
334 seq_list->head = seq_list->list[seq & mask];
335 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
337 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
342 /* Walk the list to find the sequence number */
343 u16 prev = seq_list->head;
344 while (seq_list->list[prev & mask] != seq) {
345 prev = seq_list->list[prev & mask];
346 if (prev == L2CAP_SEQ_LIST_TAIL)
347 return L2CAP_SEQ_LIST_CLEAR;
350 /* Unlink the number from the list and clear it */
351 seq_list->list[prev & mask] = seq_list->list[seq & mask];
352 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
353 if (seq_list->tail == seq)
354 seq_list->tail = prev;
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
361 /* Remove the head in constant time */
362 return l2cap_seq_list_remove(seq_list, seq_list->head);
365 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
369 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
372 for (i = 0; i <= seq_list->mask; i++)
373 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
376 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
379 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
381 u16 mask = seq_list->mask;
383 /* All appends happen in constant time */
385 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
388 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
389 seq_list->head = seq;
391 seq_list->list[seq_list->tail & mask] = seq;
393 seq_list->tail = seq;
394 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
397 static void l2cap_chan_timeout(struct work_struct *work)
399 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
401 struct l2cap_conn *conn = chan->conn;
404 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
406 mutex_lock(&conn->chan_lock);
407 l2cap_chan_lock(chan);
409 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
410 reason = ECONNREFUSED;
411 else if (chan->state == BT_CONNECT &&
412 chan->sec_level != BT_SECURITY_SDP)
413 reason = ECONNREFUSED;
417 l2cap_chan_close(chan, reason);
419 l2cap_chan_unlock(chan);
421 chan->ops->close(chan);
422 mutex_unlock(&conn->chan_lock);
424 l2cap_chan_put(chan);
427 struct l2cap_chan *l2cap_chan_create(void)
429 struct l2cap_chan *chan;
431 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
435 mutex_init(&chan->lock);
437 write_lock(&chan_list_lock);
438 list_add(&chan->global_l, &chan_list);
439 write_unlock(&chan_list_lock);
441 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
443 chan->state = BT_OPEN;
445 kref_init(&chan->kref);
447 /* This flag is cleared in l2cap_chan_ready() */
448 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
450 BT_DBG("chan %p", chan);
455 static void l2cap_chan_destroy(struct kref *kref)
457 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
459 BT_DBG("chan %p", chan);
461 write_lock(&chan_list_lock);
462 list_del(&chan->global_l);
463 write_unlock(&chan_list_lock);
468 void l2cap_chan_hold(struct l2cap_chan *c)
470 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
475 void l2cap_chan_put(struct l2cap_chan *c)
477 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
479 kref_put(&c->kref, l2cap_chan_destroy);
482 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
484 chan->fcs = L2CAP_FCS_CRC16;
485 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
486 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
487 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
488 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
489 chan->sec_level = BT_SECURITY_LOW;
491 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
494 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
496 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
497 __le16_to_cpu(chan->psm), chan->dcid);
499 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
503 switch (chan->chan_type) {
504 case L2CAP_CHAN_CONN_ORIENTED:
505 if (conn->hcon->type == LE_LINK) {
507 chan->omtu = L2CAP_DEFAULT_MTU;
508 if (chan->dcid == L2CAP_CID_ATT)
509 chan->scid = L2CAP_CID_ATT;
511 chan->scid = l2cap_alloc_cid(conn);
513 /* Alloc CID for connection-oriented socket */
514 chan->scid = l2cap_alloc_cid(conn);
515 chan->omtu = L2CAP_DEFAULT_MTU;
519 case L2CAP_CHAN_CONN_LESS:
520 /* Connectionless socket */
521 chan->scid = L2CAP_CID_CONN_LESS;
522 chan->dcid = L2CAP_CID_CONN_LESS;
523 chan->omtu = L2CAP_DEFAULT_MTU;
526 case L2CAP_CHAN_CONN_FIX_A2MP:
527 chan->scid = L2CAP_CID_A2MP;
528 chan->dcid = L2CAP_CID_A2MP;
529 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
530 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
534 /* Raw socket can send/recv signalling messages only */
535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
537 chan->omtu = L2CAP_DEFAULT_MTU;
540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
547 l2cap_chan_hold(chan);
549 hci_conn_hold(conn->hcon);
551 list_add(&chan->list, &conn->chan_l);
554 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
556 mutex_lock(&conn->chan_lock);
557 __l2cap_chan_add(conn, chan);
558 mutex_unlock(&conn->chan_lock);
561 void l2cap_chan_del(struct l2cap_chan *chan, int err)
563 struct l2cap_conn *conn = chan->conn;
565 __clear_chan_timer(chan);
567 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
570 struct amp_mgr *mgr = conn->hcon->amp_mgr;
571 /* Delete from channel list */
572 list_del(&chan->list);
574 l2cap_chan_put(chan);
578 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
579 hci_conn_drop(conn->hcon);
581 if (mgr && mgr->bredr_chan == chan)
582 mgr->bredr_chan = NULL;
585 if (chan->hs_hchan) {
586 struct hci_chan *hs_hchan = chan->hs_hchan;
588 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
589 amp_disconnect_logical_link(hs_hchan);
592 chan->ops->teardown(chan, err);
594 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
598 case L2CAP_MODE_BASIC:
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
606 skb_queue_purge(&chan->srej_q);
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
621 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
623 struct l2cap_conn *conn = chan->conn;
625 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
627 switch (chan->state) {
629 chan->ops->teardown(chan, 0);
634 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
635 conn->hcon->type == ACL_LINK) {
636 struct sock *sk = chan->sk;
637 __set_chan_timer(chan, sk->sk_sndtimeo);
638 l2cap_send_disconn_req(chan, reason);
640 l2cap_chan_del(chan, reason);
644 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
645 conn->hcon->type == ACL_LINK) {
646 struct sock *sk = chan->sk;
647 struct l2cap_conn_rsp rsp;
650 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
651 result = L2CAP_CR_SEC_BLOCK;
653 result = L2CAP_CR_BAD_PSM;
654 l2cap_state_change(chan, BT_DISCONN);
656 rsp.scid = cpu_to_le16(chan->dcid);
657 rsp.dcid = cpu_to_le16(chan->scid);
658 rsp.result = cpu_to_le16(result);
659 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
660 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
664 l2cap_chan_del(chan, reason);
669 l2cap_chan_del(chan, reason);
673 chan->ops->teardown(chan, 0);
678 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
680 switch (chan->chan_type) {
682 switch (chan->sec_level) {
683 case BT_SECURITY_HIGH:
684 return HCI_AT_DEDICATED_BONDING_MITM;
685 case BT_SECURITY_MEDIUM:
686 return HCI_AT_DEDICATED_BONDING;
688 return HCI_AT_NO_BONDING;
691 case L2CAP_CHAN_CONN_LESS:
692 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
693 if (chan->sec_level == BT_SECURITY_LOW)
694 chan->sec_level = BT_SECURITY_SDP;
696 if (chan->sec_level == BT_SECURITY_HIGH)
697 return HCI_AT_NO_BONDING_MITM;
699 return HCI_AT_NO_BONDING;
701 case L2CAP_CHAN_CONN_ORIENTED:
702 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
703 if (chan->sec_level == BT_SECURITY_LOW)
704 chan->sec_level = BT_SECURITY_SDP;
706 if (chan->sec_level == BT_SECURITY_HIGH)
707 return HCI_AT_NO_BONDING_MITM;
709 return HCI_AT_NO_BONDING;
713 switch (chan->sec_level) {
714 case BT_SECURITY_HIGH:
715 return HCI_AT_GENERAL_BONDING_MITM;
716 case BT_SECURITY_MEDIUM:
717 return HCI_AT_GENERAL_BONDING;
719 return HCI_AT_NO_BONDING;
725 /* Service level security */
726 int l2cap_chan_check_security(struct l2cap_chan *chan)
728 struct l2cap_conn *conn = chan->conn;
731 auth_type = l2cap_get_auth_type(chan);
733 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
736 static u8 l2cap_get_ident(struct l2cap_conn *conn)
740 /* Get next available identificator.
741 * 1 - 128 are used by kernel.
742 * 129 - 199 are reserved.
743 * 200 - 254 are used by utilities like l2ping, etc.
746 spin_lock(&conn->lock);
748 if (++conn->tx_ident > 128)
753 spin_unlock(&conn->lock);
758 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
761 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
764 BT_DBG("code 0x%2.2x", code);
769 if (lmp_no_flush_capable(conn->hcon->hdev))
770 flags = ACL_START_NO_FLUSH;
774 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
775 skb->priority = HCI_PRIO_MAX;
777 hci_send_acl(conn->hchan, skb, flags);
780 static bool __chan_is_moving(struct l2cap_chan *chan)
782 return chan->move_state != L2CAP_MOVE_STABLE &&
783 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
786 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
788 struct hci_conn *hcon = chan->conn->hcon;
791 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
794 if (chan->hs_hcon && !__chan_is_moving(chan)) {
796 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
803 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
804 lmp_no_flush_capable(hcon->hdev))
805 flags = ACL_START_NO_FLUSH;
809 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
810 hci_send_acl(chan->conn->hchan, skb, flags);
813 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
815 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
816 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
818 if (enh & L2CAP_CTRL_FRAME_TYPE) {
821 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
822 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
829 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
830 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
837 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
839 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
840 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
842 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
845 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
846 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
853 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
854 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
861 static inline void __unpack_control(struct l2cap_chan *chan,
864 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
865 __unpack_extended_control(get_unaligned_le32(skb->data),
866 &bt_cb(skb)->control);
867 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
869 __unpack_enhanced_control(get_unaligned_le16(skb->data),
870 &bt_cb(skb)->control);
871 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
875 static u32 __pack_extended_control(struct l2cap_ctrl *control)
879 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
880 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
882 if (control->sframe) {
883 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
884 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
885 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
887 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
888 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
894 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
898 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
899 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
901 if (control->sframe) {
902 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
903 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
904 packed |= L2CAP_CTRL_FRAME_TYPE;
906 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
907 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
913 static inline void __pack_control(struct l2cap_chan *chan,
914 struct l2cap_ctrl *control,
917 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
918 put_unaligned_le32(__pack_extended_control(control),
919 skb->data + L2CAP_HDR_SIZE);
921 put_unaligned_le16(__pack_enhanced_control(control),
922 skb->data + L2CAP_HDR_SIZE);
926 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
928 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
929 return L2CAP_EXT_HDR_SIZE;
931 return L2CAP_ENH_HDR_SIZE;
934 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
938 struct l2cap_hdr *lh;
939 int hlen = __ertm_hdr_size(chan);
941 if (chan->fcs == L2CAP_FCS_CRC16)
942 hlen += L2CAP_FCS_SIZE;
944 skb = bt_skb_alloc(hlen, GFP_KERNEL);
947 return ERR_PTR(-ENOMEM);
949 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
950 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
951 lh->cid = cpu_to_le16(chan->dcid);
953 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
954 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
956 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
958 if (chan->fcs == L2CAP_FCS_CRC16) {
959 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
960 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
963 skb->priority = HCI_PRIO_MAX;
967 static void l2cap_send_sframe(struct l2cap_chan *chan,
968 struct l2cap_ctrl *control)
973 BT_DBG("chan %p, control %p", chan, control);
975 if (!control->sframe)
978 if (__chan_is_moving(chan))
981 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
985 if (control->super == L2CAP_SUPER_RR)
986 clear_bit(CONN_RNR_SENT, &chan->conn_state);
987 else if (control->super == L2CAP_SUPER_RNR)
988 set_bit(CONN_RNR_SENT, &chan->conn_state);
990 if (control->super != L2CAP_SUPER_SREJ) {
991 chan->last_acked_seq = control->reqseq;
992 __clear_ack_timer(chan);
995 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
996 control->final, control->poll, control->super);
998 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
999 control_field = __pack_extended_control(control);
1001 control_field = __pack_enhanced_control(control);
1003 skb = l2cap_create_sframe_pdu(chan, control_field);
1005 l2cap_do_send(chan, skb);
1008 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1010 struct l2cap_ctrl control;
1012 BT_DBG("chan %p, poll %d", chan, poll);
1014 memset(&control, 0, sizeof(control));
1016 control.poll = poll;
1018 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1019 control.super = L2CAP_SUPER_RNR;
1021 control.super = L2CAP_SUPER_RR;
1023 control.reqseq = chan->buffer_seq;
1024 l2cap_send_sframe(chan, &control);
1027 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1029 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1032 static bool __amp_capable(struct l2cap_chan *chan)
1034 struct l2cap_conn *conn = chan->conn;
1035 struct hci_dev *hdev;
1036 bool amp_available = false;
1038 if (!conn->hs_enabled)
1041 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1044 read_lock(&hci_dev_list_lock);
1045 list_for_each_entry(hdev, &hci_dev_list, list) {
1046 if (hdev->amp_type != AMP_TYPE_BREDR &&
1047 test_bit(HCI_UP, &hdev->flags)) {
1048 amp_available = true;
1052 read_unlock(&hci_dev_list_lock);
1054 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1055 return amp_available;
1060 static bool l2cap_check_efs(struct l2cap_chan *chan)
1062 /* Check EFS parameters */
1066 void l2cap_send_conn_req(struct l2cap_chan *chan)
1068 struct l2cap_conn *conn = chan->conn;
1069 struct l2cap_conn_req req;
1071 req.scid = cpu_to_le16(chan->scid);
1072 req.psm = chan->psm;
1074 chan->ident = l2cap_get_ident(conn);
1076 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1078 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1081 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1083 struct l2cap_create_chan_req req;
1084 req.scid = cpu_to_le16(chan->scid);
1085 req.psm = chan->psm;
1086 req.amp_id = amp_id;
1088 chan->ident = l2cap_get_ident(chan->conn);
1090 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1094 static void l2cap_move_setup(struct l2cap_chan *chan)
1096 struct sk_buff *skb;
1098 BT_DBG("chan %p", chan);
1100 if (chan->mode != L2CAP_MODE_ERTM)
1103 __clear_retrans_timer(chan);
1104 __clear_monitor_timer(chan);
1105 __clear_ack_timer(chan);
1107 chan->retry_count = 0;
1108 skb_queue_walk(&chan->tx_q, skb) {
1109 if (bt_cb(skb)->control.retries)
1110 bt_cb(skb)->control.retries = 1;
1115 chan->expected_tx_seq = chan->buffer_seq;
1117 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1118 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1119 l2cap_seq_list_clear(&chan->retrans_list);
1120 l2cap_seq_list_clear(&chan->srej_list);
1121 skb_queue_purge(&chan->srej_q);
1123 chan->tx_state = L2CAP_TX_STATE_XMIT;
1124 chan->rx_state = L2CAP_RX_STATE_MOVE;
1126 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1129 static void l2cap_move_done(struct l2cap_chan *chan)
1131 u8 move_role = chan->move_role;
1132 BT_DBG("chan %p", chan);
1134 chan->move_state = L2CAP_MOVE_STABLE;
1135 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1137 if (chan->mode != L2CAP_MODE_ERTM)
1140 switch (move_role) {
1141 case L2CAP_MOVE_ROLE_INITIATOR:
1142 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1143 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1145 case L2CAP_MOVE_ROLE_RESPONDER:
1146 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1151 static void l2cap_chan_ready(struct l2cap_chan *chan)
1153 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1154 chan->conf_state = 0;
1155 __clear_chan_timer(chan);
1157 chan->state = BT_CONNECTED;
1159 chan->ops->ready(chan);
1162 static void l2cap_start_connection(struct l2cap_chan *chan)
1164 if (__amp_capable(chan)) {
1165 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1166 a2mp_discover_amp(chan);
1168 l2cap_send_conn_req(chan);
1172 static void l2cap_do_start(struct l2cap_chan *chan)
1174 struct l2cap_conn *conn = chan->conn;
1176 if (conn->hcon->type == LE_LINK) {
1177 l2cap_chan_ready(chan);
1181 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1182 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1185 if (l2cap_chan_check_security(chan) &&
1186 __l2cap_no_conn_pending(chan)) {
1187 l2cap_start_connection(chan);
1190 struct l2cap_info_req req;
1191 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1193 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1194 conn->info_ident = l2cap_get_ident(conn);
1196 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1198 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1203 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1205 u32 local_feat_mask = l2cap_feat_mask;
1207 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1210 case L2CAP_MODE_ERTM:
1211 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1212 case L2CAP_MODE_STREAMING:
1213 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1219 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1221 struct sock *sk = chan->sk;
1222 struct l2cap_conn *conn = chan->conn;
1223 struct l2cap_disconn_req req;
1228 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1229 __clear_retrans_timer(chan);
1230 __clear_monitor_timer(chan);
1231 __clear_ack_timer(chan);
1234 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1235 l2cap_state_change(chan, BT_DISCONN);
1239 req.dcid = cpu_to_le16(chan->dcid);
1240 req.scid = cpu_to_le16(chan->scid);
1241 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1245 __l2cap_state_change(chan, BT_DISCONN);
1246 __l2cap_chan_set_err(chan, err);
1250 /* ---- L2CAP connections ---- */
1251 static void l2cap_conn_start(struct l2cap_conn *conn)
1253 struct l2cap_chan *chan, *tmp;
1255 BT_DBG("conn %p", conn);
1257 mutex_lock(&conn->chan_lock);
1259 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1260 l2cap_chan_lock(chan);
1262 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1263 l2cap_chan_unlock(chan);
1267 if (chan->state == BT_CONNECT) {
1268 if (!l2cap_chan_check_security(chan) ||
1269 !__l2cap_no_conn_pending(chan)) {
1270 l2cap_chan_unlock(chan);
1274 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1275 && test_bit(CONF_STATE2_DEVICE,
1276 &chan->conf_state)) {
1277 l2cap_chan_close(chan, ECONNRESET);
1278 l2cap_chan_unlock(chan);
1282 l2cap_start_connection(chan);
1284 } else if (chan->state == BT_CONNECT2) {
1285 struct sock *sk = chan->sk;
1286 struct l2cap_conn_rsp rsp;
1288 rsp.scid = cpu_to_le16(chan->dcid);
1289 rsp.dcid = cpu_to_le16(chan->scid);
1291 if (l2cap_chan_check_security(chan)) {
1293 if (test_bit(BT_SK_DEFER_SETUP,
1294 &bt_sk(sk)->flags)) {
1295 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1296 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1297 chan->ops->defer(chan);
1300 __l2cap_state_change(chan, BT_CONFIG);
1301 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1302 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1306 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1307 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1310 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1313 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1314 rsp.result != L2CAP_CR_SUCCESS) {
1315 l2cap_chan_unlock(chan);
1319 set_bit(CONF_REQ_SENT, &chan->conf_state);
1320 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1321 l2cap_build_conf_req(chan, buf), buf);
1322 chan->num_conf_req++;
1325 l2cap_chan_unlock(chan);
1328 mutex_unlock(&conn->chan_lock);
1331 /* Find socket with cid and source/destination bdaddr.
1332 * Returns closest match, locked.
1334 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1338 struct l2cap_chan *c, *c1 = NULL;
1340 read_lock(&chan_list_lock);
1342 list_for_each_entry(c, &chan_list, global_l) {
1343 if (state && c->state != state)
1346 if (c->scid == cid) {
1347 int src_match, dst_match;
1348 int src_any, dst_any;
1351 src_match = !bacmp(&c->src, src);
1352 dst_match = !bacmp(&c->dst, dst);
1353 if (src_match && dst_match) {
1354 read_unlock(&chan_list_lock);
1359 src_any = !bacmp(&c->src, BDADDR_ANY);
1360 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1361 if ((src_match && dst_any) || (src_any && dst_match) ||
1362 (src_any && dst_any))
1367 read_unlock(&chan_list_lock);
1372 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1374 struct sock *parent;
1375 struct l2cap_chan *chan, *pchan;
1379 /* Check if we have socket listening on cid */
1380 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1381 &conn->hcon->src, &conn->hcon->dst);
1385 /* Client ATT sockets should override the server one */
1386 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1393 chan = pchan->ops->new_connection(pchan);
1397 chan->dcid = L2CAP_CID_ATT;
1399 bacpy(&chan->src, &conn->hcon->src);
1400 bacpy(&chan->dst, &conn->hcon->dst);
1402 __l2cap_chan_add(conn, chan);
1405 release_sock(parent);
1408 static void l2cap_conn_ready(struct l2cap_conn *conn)
1410 struct l2cap_chan *chan;
1411 struct hci_conn *hcon = conn->hcon;
1413 BT_DBG("conn %p", conn);
1415 /* For outgoing pairing which doesn't necessarily have an
1416 * associated socket (e.g. mgmt_pair_device).
1418 if (hcon->out && hcon->type == LE_LINK)
1419 smp_conn_security(hcon, hcon->pending_sec_level);
1421 mutex_lock(&conn->chan_lock);
1423 if (hcon->type == LE_LINK)
1424 l2cap_le_conn_ready(conn);
1426 list_for_each_entry(chan, &conn->chan_l, list) {
1428 l2cap_chan_lock(chan);
1430 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1431 l2cap_chan_unlock(chan);
1435 if (hcon->type == LE_LINK) {
1436 if (smp_conn_security(hcon, chan->sec_level))
1437 l2cap_chan_ready(chan);
1439 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1440 struct sock *sk = chan->sk;
1441 __clear_chan_timer(chan);
1443 __l2cap_state_change(chan, BT_CONNECTED);
1444 sk->sk_state_change(sk);
1447 } else if (chan->state == BT_CONNECT) {
1448 l2cap_do_start(chan);
1451 l2cap_chan_unlock(chan);
1454 mutex_unlock(&conn->chan_lock);
1457 /* Notify sockets that we cannot guaranty reliability anymore */
1458 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1460 struct l2cap_chan *chan;
1462 BT_DBG("conn %p", conn);
1464 mutex_lock(&conn->chan_lock);
1466 list_for_each_entry(chan, &conn->chan_l, list) {
1467 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1468 l2cap_chan_set_err(chan, err);
1471 mutex_unlock(&conn->chan_lock);
1474 static void l2cap_info_timeout(struct work_struct *work)
1476 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1479 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1480 conn->info_ident = 0;
1482 l2cap_conn_start(conn);
1487 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1488 * callback is called during registration. The ->remove callback is called
1489 * during unregistration.
1490 * An l2cap_user object can either be explicitly unregistered or when the
1491 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1492 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1493 * External modules must own a reference to the l2cap_conn object if they intend
1494 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1495 * any time if they don't.
1498 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1500 struct hci_dev *hdev = conn->hcon->hdev;
1503 /* We need to check whether l2cap_conn is registered. If it is not, we
1504 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1505 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1506 * relies on the parent hci_conn object to be locked. This itself relies
1507 * on the hci_dev object to be locked. So we must lock the hci device
1512 if (user->list.next || user->list.prev) {
1517 /* conn->hchan is NULL after l2cap_conn_del() was called */
1523 ret = user->probe(conn, user);
1527 list_add(&user->list, &conn->users);
1531 hci_dev_unlock(hdev);
1534 EXPORT_SYMBOL(l2cap_register_user);
1536 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1538 struct hci_dev *hdev = conn->hcon->hdev;
1542 if (!user->list.next || !user->list.prev)
1545 list_del(&user->list);
1546 user->list.next = NULL;
1547 user->list.prev = NULL;
1548 user->remove(conn, user);
1551 hci_dev_unlock(hdev);
1553 EXPORT_SYMBOL(l2cap_unregister_user);
1555 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1557 struct l2cap_user *user;
1559 while (!list_empty(&conn->users)) {
1560 user = list_first_entry(&conn->users, struct l2cap_user, list);
1561 list_del(&user->list);
1562 user->list.next = NULL;
1563 user->list.prev = NULL;
1564 user->remove(conn, user);
1568 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1570 struct l2cap_conn *conn = hcon->l2cap_data;
1571 struct l2cap_chan *chan, *l;
1576 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1578 kfree_skb(conn->rx_skb);
1580 l2cap_unregister_all_users(conn);
1582 mutex_lock(&conn->chan_lock);
1585 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1586 l2cap_chan_hold(chan);
1587 l2cap_chan_lock(chan);
1589 l2cap_chan_del(chan, err);
1591 l2cap_chan_unlock(chan);
1593 chan->ops->close(chan);
1594 l2cap_chan_put(chan);
1597 mutex_unlock(&conn->chan_lock);
1599 hci_chan_del(conn->hchan);
1601 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1602 cancel_delayed_work_sync(&conn->info_timer);
1604 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1605 cancel_delayed_work_sync(&conn->security_timer);
1606 smp_chan_destroy(conn);
1609 hcon->l2cap_data = NULL;
1611 l2cap_conn_put(conn);
1614 static void security_timeout(struct work_struct *work)
1616 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1617 security_timer.work);
1619 BT_DBG("conn %p", conn);
1621 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1622 smp_chan_destroy(conn);
1623 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1627 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1629 struct l2cap_conn *conn = hcon->l2cap_data;
1630 struct hci_chan *hchan;
1635 hchan = hci_chan_create(hcon);
1639 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1641 hci_chan_del(hchan);
1645 kref_init(&conn->ref);
1646 hcon->l2cap_data = conn;
1648 hci_conn_get(conn->hcon);
1649 conn->hchan = hchan;
1651 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1653 switch (hcon->type) {
1655 if (hcon->hdev->le_mtu) {
1656 conn->mtu = hcon->hdev->le_mtu;
1661 conn->mtu = hcon->hdev->acl_mtu;
1665 conn->feat_mask = 0;
1667 if (hcon->type == ACL_LINK)
1668 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1669 &hcon->hdev->dev_flags);
1671 spin_lock_init(&conn->lock);
1672 mutex_init(&conn->chan_lock);
1674 INIT_LIST_HEAD(&conn->chan_l);
1675 INIT_LIST_HEAD(&conn->users);
1677 if (hcon->type == LE_LINK)
1678 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1680 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1682 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1687 static void l2cap_conn_free(struct kref *ref)
1689 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1691 hci_conn_put(conn->hcon);
1695 void l2cap_conn_get(struct l2cap_conn *conn)
1697 kref_get(&conn->ref);
1699 EXPORT_SYMBOL(l2cap_conn_get);
1701 void l2cap_conn_put(struct l2cap_conn *conn)
1703 kref_put(&conn->ref, l2cap_conn_free);
1705 EXPORT_SYMBOL(l2cap_conn_put);
1707 /* ---- Socket interface ---- */
1709 /* Find socket with psm and source / destination bdaddr.
1710 * Returns closest match.
1712 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1716 struct l2cap_chan *c, *c1 = NULL;
1718 read_lock(&chan_list_lock);
1720 list_for_each_entry(c, &chan_list, global_l) {
1721 if (state && c->state != state)
1724 if (c->psm == psm) {
1725 int src_match, dst_match;
1726 int src_any, dst_any;
1729 src_match = !bacmp(&c->src, src);
1730 dst_match = !bacmp(&c->dst, dst);
1731 if (src_match && dst_match) {
1732 read_unlock(&chan_list_lock);
1737 src_any = !bacmp(&c->src, BDADDR_ANY);
1738 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1739 if ((src_match && dst_any) || (src_any && dst_match) ||
1740 (src_any && dst_any))
1745 read_unlock(&chan_list_lock);
1750 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1751 bdaddr_t *dst, u8 dst_type)
1753 struct sock *sk = chan->sk;
1754 struct l2cap_conn *conn;
1755 struct hci_conn *hcon;
1756 struct hci_dev *hdev;
1760 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1761 dst_type, __le16_to_cpu(psm));
1763 hdev = hci_get_route(dst, &chan->src);
1765 return -EHOSTUNREACH;
1769 l2cap_chan_lock(chan);
1771 /* PSM must be odd and lsb of upper byte must be 0 */
1772 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1773 chan->chan_type != L2CAP_CHAN_RAW) {
1778 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1783 switch (chan->mode) {
1784 case L2CAP_MODE_BASIC:
1786 case L2CAP_MODE_ERTM:
1787 case L2CAP_MODE_STREAMING:
1796 switch (chan->state) {
1800 /* Already connecting */
1805 /* Already connected */
1819 /* Set destination address and psm */
1820 bacpy(&chan->dst, dst);
1825 auth_type = l2cap_get_auth_type(chan);
1827 if (bdaddr_type_is_le(dst_type))
1828 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1829 chan->sec_level, auth_type);
1831 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1832 chan->sec_level, auth_type);
1835 err = PTR_ERR(hcon);
1839 conn = l2cap_conn_add(hcon);
1841 hci_conn_drop(hcon);
1846 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1847 hci_conn_drop(hcon);
1852 /* Update source addr of the socket */
1853 bacpy(&chan->src, &hcon->src);
1855 l2cap_chan_unlock(chan);
1856 l2cap_chan_add(conn, chan);
1857 l2cap_chan_lock(chan);
1859 /* l2cap_chan_add takes its own ref so we can drop this one */
1860 hci_conn_drop(hcon);
1862 l2cap_state_change(chan, BT_CONNECT);
1863 __set_chan_timer(chan, sk->sk_sndtimeo);
1865 if (hcon->state == BT_CONNECTED) {
1866 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1867 __clear_chan_timer(chan);
1868 if (l2cap_chan_check_security(chan))
1869 l2cap_state_change(chan, BT_CONNECTED);
1871 l2cap_do_start(chan);
1877 l2cap_chan_unlock(chan);
1878 hci_dev_unlock(hdev);
1883 int __l2cap_wait_ack(struct sock *sk)
1885 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1886 DECLARE_WAITQUEUE(wait, current);
1890 add_wait_queue(sk_sleep(sk), &wait);
1891 set_current_state(TASK_INTERRUPTIBLE);
1892 while (chan->unacked_frames > 0 && chan->conn) {
1896 if (signal_pending(current)) {
1897 err = sock_intr_errno(timeo);
1902 timeo = schedule_timeout(timeo);
1904 set_current_state(TASK_INTERRUPTIBLE);
1906 err = sock_error(sk);
1910 set_current_state(TASK_RUNNING);
1911 remove_wait_queue(sk_sleep(sk), &wait);
1915 static void l2cap_monitor_timeout(struct work_struct *work)
1917 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1918 monitor_timer.work);
1920 BT_DBG("chan %p", chan);
1922 l2cap_chan_lock(chan);
1925 l2cap_chan_unlock(chan);
1926 l2cap_chan_put(chan);
1930 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1932 l2cap_chan_unlock(chan);
1933 l2cap_chan_put(chan);
1936 static void l2cap_retrans_timeout(struct work_struct *work)
1938 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1939 retrans_timer.work);
1941 BT_DBG("chan %p", chan);
1943 l2cap_chan_lock(chan);
1946 l2cap_chan_unlock(chan);
1947 l2cap_chan_put(chan);
1951 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1952 l2cap_chan_unlock(chan);
1953 l2cap_chan_put(chan);
1956 static void l2cap_streaming_send(struct l2cap_chan *chan,
1957 struct sk_buff_head *skbs)
1959 struct sk_buff *skb;
1960 struct l2cap_ctrl *control;
1962 BT_DBG("chan %p, skbs %p", chan, skbs);
1964 if (__chan_is_moving(chan))
1967 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1969 while (!skb_queue_empty(&chan->tx_q)) {
1971 skb = skb_dequeue(&chan->tx_q);
1973 bt_cb(skb)->control.retries = 1;
1974 control = &bt_cb(skb)->control;
1976 control->reqseq = 0;
1977 control->txseq = chan->next_tx_seq;
1979 __pack_control(chan, control, skb);
1981 if (chan->fcs == L2CAP_FCS_CRC16) {
1982 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1983 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1986 l2cap_do_send(chan, skb);
1988 BT_DBG("Sent txseq %u", control->txseq);
1990 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1991 chan->frames_sent++;
1995 static int l2cap_ertm_send(struct l2cap_chan *chan)
1997 struct sk_buff *skb, *tx_skb;
1998 struct l2cap_ctrl *control;
2001 BT_DBG("chan %p", chan);
2003 if (chan->state != BT_CONNECTED)
2006 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2009 if (__chan_is_moving(chan))
2012 while (chan->tx_send_head &&
2013 chan->unacked_frames < chan->remote_tx_win &&
2014 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2016 skb = chan->tx_send_head;
2018 bt_cb(skb)->control.retries = 1;
2019 control = &bt_cb(skb)->control;
2021 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2024 control->reqseq = chan->buffer_seq;
2025 chan->last_acked_seq = chan->buffer_seq;
2026 control->txseq = chan->next_tx_seq;
2028 __pack_control(chan, control, skb);
2030 if (chan->fcs == L2CAP_FCS_CRC16) {
2031 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2032 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2035 /* Clone after data has been modified. Data is assumed to be
2036 read-only (for locking purposes) on cloned sk_buffs.
2038 tx_skb = skb_clone(skb, GFP_KERNEL);
2043 __set_retrans_timer(chan);
2045 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2046 chan->unacked_frames++;
2047 chan->frames_sent++;
2050 if (skb_queue_is_last(&chan->tx_q, skb))
2051 chan->tx_send_head = NULL;
2053 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2055 l2cap_do_send(chan, tx_skb);
2056 BT_DBG("Sent txseq %u", control->txseq);
2059 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2060 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2065 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2067 struct l2cap_ctrl control;
2068 struct sk_buff *skb;
2069 struct sk_buff *tx_skb;
2072 BT_DBG("chan %p", chan);
2074 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2077 if (__chan_is_moving(chan))
2080 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2081 seq = l2cap_seq_list_pop(&chan->retrans_list);
2083 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2085 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2090 bt_cb(skb)->control.retries++;
2091 control = bt_cb(skb)->control;
2093 if (chan->max_tx != 0 &&
2094 bt_cb(skb)->control.retries > chan->max_tx) {
2095 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2096 l2cap_send_disconn_req(chan, ECONNRESET);
2097 l2cap_seq_list_clear(&chan->retrans_list);
2101 control.reqseq = chan->buffer_seq;
2102 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2107 if (skb_cloned(skb)) {
2108 /* Cloned sk_buffs are read-only, so we need a
2111 tx_skb = skb_copy(skb, GFP_KERNEL);
2113 tx_skb = skb_clone(skb, GFP_KERNEL);
2117 l2cap_seq_list_clear(&chan->retrans_list);
2121 /* Update skb contents */
2122 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2123 put_unaligned_le32(__pack_extended_control(&control),
2124 tx_skb->data + L2CAP_HDR_SIZE);
2126 put_unaligned_le16(__pack_enhanced_control(&control),
2127 tx_skb->data + L2CAP_HDR_SIZE);
2130 if (chan->fcs == L2CAP_FCS_CRC16) {
2131 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2132 put_unaligned_le16(fcs, skb_put(tx_skb,
2136 l2cap_do_send(chan, tx_skb);
2138 BT_DBG("Resent txseq %d", control.txseq);
2140 chan->last_acked_seq = chan->buffer_seq;
2144 static void l2cap_retransmit(struct l2cap_chan *chan,
2145 struct l2cap_ctrl *control)
2147 BT_DBG("chan %p, control %p", chan, control);
2149 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2150 l2cap_ertm_resend(chan);
2153 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2154 struct l2cap_ctrl *control)
2156 struct sk_buff *skb;
2158 BT_DBG("chan %p, control %p", chan, control);
2161 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2163 l2cap_seq_list_clear(&chan->retrans_list);
2165 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2168 if (chan->unacked_frames) {
2169 skb_queue_walk(&chan->tx_q, skb) {
2170 if (bt_cb(skb)->control.txseq == control->reqseq ||
2171 skb == chan->tx_send_head)
2175 skb_queue_walk_from(&chan->tx_q, skb) {
2176 if (skb == chan->tx_send_head)
2179 l2cap_seq_list_append(&chan->retrans_list,
2180 bt_cb(skb)->control.txseq);
2183 l2cap_ertm_resend(chan);
2187 static void l2cap_send_ack(struct l2cap_chan *chan)
2189 struct l2cap_ctrl control;
2190 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2191 chan->last_acked_seq);
2194 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2195 chan, chan->last_acked_seq, chan->buffer_seq);
2197 memset(&control, 0, sizeof(control));
2200 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2201 chan->rx_state == L2CAP_RX_STATE_RECV) {
2202 __clear_ack_timer(chan);
2203 control.super = L2CAP_SUPER_RNR;
2204 control.reqseq = chan->buffer_seq;
2205 l2cap_send_sframe(chan, &control);
2207 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2208 l2cap_ertm_send(chan);
2209 /* If any i-frames were sent, they included an ack */
2210 if (chan->buffer_seq == chan->last_acked_seq)
2214 /* Ack now if the window is 3/4ths full.
2215 * Calculate without mul or div
2217 threshold = chan->ack_win;
2218 threshold += threshold << 1;
2221 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2224 if (frames_to_ack >= threshold) {
2225 __clear_ack_timer(chan);
2226 control.super = L2CAP_SUPER_RR;
2227 control.reqseq = chan->buffer_seq;
2228 l2cap_send_sframe(chan, &control);
2233 __set_ack_timer(chan);
2237 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2238 struct msghdr *msg, int len,
2239 int count, struct sk_buff *skb)
2241 struct l2cap_conn *conn = chan->conn;
2242 struct sk_buff **frag;
2245 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2251 /* Continuation fragments (no L2CAP header) */
2252 frag = &skb_shinfo(skb)->frag_list;
2254 struct sk_buff *tmp;
2256 count = min_t(unsigned int, conn->mtu, len);
2258 tmp = chan->ops->alloc_skb(chan, count,
2259 msg->msg_flags & MSG_DONTWAIT);
2261 return PTR_ERR(tmp);
2265 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2268 (*frag)->priority = skb->priority;
2273 skb->len += (*frag)->len;
2274 skb->data_len += (*frag)->len;
2276 frag = &(*frag)->next;
2282 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2283 struct msghdr *msg, size_t len,
2286 struct l2cap_conn *conn = chan->conn;
2287 struct sk_buff *skb;
2288 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2289 struct l2cap_hdr *lh;
2291 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2292 __le16_to_cpu(chan->psm), len, priority);
2294 count = min_t(unsigned int, (conn->mtu - hlen), len);
2296 skb = chan->ops->alloc_skb(chan, count + hlen,
2297 msg->msg_flags & MSG_DONTWAIT);
2301 skb->priority = priority;
2303 /* Create L2CAP header */
2304 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2305 lh->cid = cpu_to_le16(chan->dcid);
2306 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2307 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2309 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2310 if (unlikely(err < 0)) {
2312 return ERR_PTR(err);
2317 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2318 struct msghdr *msg, size_t len,
2321 struct l2cap_conn *conn = chan->conn;
2322 struct sk_buff *skb;
2324 struct l2cap_hdr *lh;
2326 BT_DBG("chan %p len %zu", chan, len);
2328 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2330 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2331 msg->msg_flags & MSG_DONTWAIT);
2335 skb->priority = priority;
2337 /* Create L2CAP header */
2338 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2339 lh->cid = cpu_to_le16(chan->dcid);
2340 lh->len = cpu_to_le16(len);
2342 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2343 if (unlikely(err < 0)) {
2345 return ERR_PTR(err);
2350 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2351 struct msghdr *msg, size_t len,
2354 struct l2cap_conn *conn = chan->conn;
2355 struct sk_buff *skb;
2356 int err, count, hlen;
2357 struct l2cap_hdr *lh;
2359 BT_DBG("chan %p len %zu", chan, len);
2362 return ERR_PTR(-ENOTCONN);
2364 hlen = __ertm_hdr_size(chan);
2367 hlen += L2CAP_SDULEN_SIZE;
2369 if (chan->fcs == L2CAP_FCS_CRC16)
2370 hlen += L2CAP_FCS_SIZE;
2372 count = min_t(unsigned int, (conn->mtu - hlen), len);
2374 skb = chan->ops->alloc_skb(chan, count + hlen,
2375 msg->msg_flags & MSG_DONTWAIT);
2379 /* Create L2CAP header */
2380 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2381 lh->cid = cpu_to_le16(chan->dcid);
2382 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2384 /* Control header is populated later */
2385 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2386 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2388 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2391 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2393 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2394 if (unlikely(err < 0)) {
2396 return ERR_PTR(err);
2399 bt_cb(skb)->control.fcs = chan->fcs;
2400 bt_cb(skb)->control.retries = 0;
2404 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2405 struct sk_buff_head *seg_queue,
2406 struct msghdr *msg, size_t len)
2408 struct sk_buff *skb;
2413 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2415 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2416 * so fragmented skbs are not used. The HCI layer's handling
2417 * of fragmented skbs is not compatible with ERTM's queueing.
2420 /* PDU size is derived from the HCI MTU */
2421 pdu_len = chan->conn->mtu;
2423 /* Constrain PDU size for BR/EDR connections */
2425 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2427 /* Adjust for largest possible L2CAP overhead. */
2429 pdu_len -= L2CAP_FCS_SIZE;
2431 pdu_len -= __ertm_hdr_size(chan);
2433 /* Remote device may have requested smaller PDUs */
2434 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2436 if (len <= pdu_len) {
2437 sar = L2CAP_SAR_UNSEGMENTED;
2441 sar = L2CAP_SAR_START;
2443 pdu_len -= L2CAP_SDULEN_SIZE;
2447 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2450 __skb_queue_purge(seg_queue);
2451 return PTR_ERR(skb);
2454 bt_cb(skb)->control.sar = sar;
2455 __skb_queue_tail(seg_queue, skb);
2460 pdu_len += L2CAP_SDULEN_SIZE;
2463 if (len <= pdu_len) {
2464 sar = L2CAP_SAR_END;
2467 sar = L2CAP_SAR_CONTINUE;
2474 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2477 struct sk_buff *skb;
2479 struct sk_buff_head seg_queue;
2481 /* Connectionless channel */
2482 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2483 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2485 return PTR_ERR(skb);
2487 l2cap_do_send(chan, skb);
2491 switch (chan->mode) {
2492 case L2CAP_MODE_BASIC:
2493 /* Check outgoing MTU */
2494 if (len > chan->omtu)
2497 /* Create a basic PDU */
2498 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2500 return PTR_ERR(skb);
2502 l2cap_do_send(chan, skb);
2506 case L2CAP_MODE_ERTM:
2507 case L2CAP_MODE_STREAMING:
2508 /* Check outgoing MTU */
2509 if (len > chan->omtu) {
2514 __skb_queue_head_init(&seg_queue);
2516 /* Do segmentation before calling in to the state machine,
2517 * since it's possible to block while waiting for memory
2520 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2522 /* The channel could have been closed while segmenting,
2523 * check that it is still connected.
2525 if (chan->state != BT_CONNECTED) {
2526 __skb_queue_purge(&seg_queue);
2533 if (chan->mode == L2CAP_MODE_ERTM)
2534 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2536 l2cap_streaming_send(chan, &seg_queue);
2540 /* If the skbs were not queued for sending, they'll still be in
2541 * seg_queue and need to be purged.
2543 __skb_queue_purge(&seg_queue);
2547 BT_DBG("bad state %1.1x", chan->mode);
2554 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2556 struct l2cap_ctrl control;
2559 BT_DBG("chan %p, txseq %u", chan, txseq);
2561 memset(&control, 0, sizeof(control));
2563 control.super = L2CAP_SUPER_SREJ;
2565 for (seq = chan->expected_tx_seq; seq != txseq;
2566 seq = __next_seq(chan, seq)) {
2567 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2568 control.reqseq = seq;
2569 l2cap_send_sframe(chan, &control);
2570 l2cap_seq_list_append(&chan->srej_list, seq);
2574 chan->expected_tx_seq = __next_seq(chan, txseq);
2577 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2579 struct l2cap_ctrl control;
2581 BT_DBG("chan %p", chan);
2583 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2586 memset(&control, 0, sizeof(control));
2588 control.super = L2CAP_SUPER_SREJ;
2589 control.reqseq = chan->srej_list.tail;
2590 l2cap_send_sframe(chan, &control);
2593 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2595 struct l2cap_ctrl control;
2599 BT_DBG("chan %p, txseq %u", chan, txseq);
2601 memset(&control, 0, sizeof(control));
2603 control.super = L2CAP_SUPER_SREJ;
2605 /* Capture initial list head to allow only one pass through the list. */
2606 initial_head = chan->srej_list.head;
2609 seq = l2cap_seq_list_pop(&chan->srej_list);
2610 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2613 control.reqseq = seq;
2614 l2cap_send_sframe(chan, &control);
2615 l2cap_seq_list_append(&chan->srej_list, seq);
2616 } while (chan->srej_list.head != initial_head);
2619 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2621 struct sk_buff *acked_skb;
2624 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2626 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2629 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2630 chan->expected_ack_seq, chan->unacked_frames);
2632 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2633 ackseq = __next_seq(chan, ackseq)) {
2635 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2637 skb_unlink(acked_skb, &chan->tx_q);
2638 kfree_skb(acked_skb);
2639 chan->unacked_frames--;
2643 chan->expected_ack_seq = reqseq;
2645 if (chan->unacked_frames == 0)
2646 __clear_retrans_timer(chan);
2648 BT_DBG("unacked_frames %u", chan->unacked_frames);
2651 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2653 BT_DBG("chan %p", chan);
2655 chan->expected_tx_seq = chan->buffer_seq;
2656 l2cap_seq_list_clear(&chan->srej_list);
2657 skb_queue_purge(&chan->srej_q);
2658 chan->rx_state = L2CAP_RX_STATE_RECV;
2661 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2662 struct l2cap_ctrl *control,
2663 struct sk_buff_head *skbs, u8 event)
2665 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2669 case L2CAP_EV_DATA_REQUEST:
2670 if (chan->tx_send_head == NULL)
2671 chan->tx_send_head = skb_peek(skbs);
2673 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2674 l2cap_ertm_send(chan);
2676 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2677 BT_DBG("Enter LOCAL_BUSY");
2678 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2680 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2681 /* The SREJ_SENT state must be aborted if we are to
2682 * enter the LOCAL_BUSY state.
2684 l2cap_abort_rx_srej_sent(chan);
2687 l2cap_send_ack(chan);
2690 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2691 BT_DBG("Exit LOCAL_BUSY");
2692 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2694 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2695 struct l2cap_ctrl local_control;
2697 memset(&local_control, 0, sizeof(local_control));
2698 local_control.sframe = 1;
2699 local_control.super = L2CAP_SUPER_RR;
2700 local_control.poll = 1;
2701 local_control.reqseq = chan->buffer_seq;
2702 l2cap_send_sframe(chan, &local_control);
2704 chan->retry_count = 1;
2705 __set_monitor_timer(chan);
2706 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2709 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2710 l2cap_process_reqseq(chan, control->reqseq);
2712 case L2CAP_EV_EXPLICIT_POLL:
2713 l2cap_send_rr_or_rnr(chan, 1);
2714 chan->retry_count = 1;
2715 __set_monitor_timer(chan);
2716 __clear_ack_timer(chan);
2717 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2719 case L2CAP_EV_RETRANS_TO:
2720 l2cap_send_rr_or_rnr(chan, 1);
2721 chan->retry_count = 1;
2722 __set_monitor_timer(chan);
2723 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2725 case L2CAP_EV_RECV_FBIT:
2726 /* Nothing to process */
2733 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2734 struct l2cap_ctrl *control,
2735 struct sk_buff_head *skbs, u8 event)
2737 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2741 case L2CAP_EV_DATA_REQUEST:
2742 if (chan->tx_send_head == NULL)
2743 chan->tx_send_head = skb_peek(skbs);
2744 /* Queue data, but don't send. */
2745 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2747 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2748 BT_DBG("Enter LOCAL_BUSY");
2749 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2751 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2752 /* The SREJ_SENT state must be aborted if we are to
2753 * enter the LOCAL_BUSY state.
2755 l2cap_abort_rx_srej_sent(chan);
2758 l2cap_send_ack(chan);
2761 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2762 BT_DBG("Exit LOCAL_BUSY");
2763 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2765 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2766 struct l2cap_ctrl local_control;
2767 memset(&local_control, 0, sizeof(local_control));
2768 local_control.sframe = 1;
2769 local_control.super = L2CAP_SUPER_RR;
2770 local_control.poll = 1;
2771 local_control.reqseq = chan->buffer_seq;
2772 l2cap_send_sframe(chan, &local_control);
2774 chan->retry_count = 1;
2775 __set_monitor_timer(chan);
2776 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2779 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2780 l2cap_process_reqseq(chan, control->reqseq);
2784 case L2CAP_EV_RECV_FBIT:
2785 if (control && control->final) {
2786 __clear_monitor_timer(chan);
2787 if (chan->unacked_frames > 0)
2788 __set_retrans_timer(chan);
2789 chan->retry_count = 0;
2790 chan->tx_state = L2CAP_TX_STATE_XMIT;
2791 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2794 case L2CAP_EV_EXPLICIT_POLL:
2797 case L2CAP_EV_MONITOR_TO:
2798 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2799 l2cap_send_rr_or_rnr(chan, 1);
2800 __set_monitor_timer(chan);
2801 chan->retry_count++;
2803 l2cap_send_disconn_req(chan, ECONNABORTED);
2811 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2812 struct sk_buff_head *skbs, u8 event)
2814 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2815 chan, control, skbs, event, chan->tx_state);
2817 switch (chan->tx_state) {
2818 case L2CAP_TX_STATE_XMIT:
2819 l2cap_tx_state_xmit(chan, control, skbs, event);
2821 case L2CAP_TX_STATE_WAIT_F:
2822 l2cap_tx_state_wait_f(chan, control, skbs, event);
2830 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2831 struct l2cap_ctrl *control)
2833 BT_DBG("chan %p, control %p", chan, control);
2834 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2837 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2838 struct l2cap_ctrl *control)
2840 BT_DBG("chan %p, control %p", chan, control);
2841 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2844 /* Copy frame to all raw sockets on that connection */
2845 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2847 struct sk_buff *nskb;
2848 struct l2cap_chan *chan;
2850 BT_DBG("conn %p", conn);
2852 mutex_lock(&conn->chan_lock);
2854 list_for_each_entry(chan, &conn->chan_l, list) {
2855 struct sock *sk = chan->sk;
2856 if (chan->chan_type != L2CAP_CHAN_RAW)
2859 /* Don't send frame to the socket it came from */
2862 nskb = skb_clone(skb, GFP_KERNEL);
2866 if (chan->ops->recv(chan, nskb))
2870 mutex_unlock(&conn->chan_lock);
2873 /* ---- L2CAP signalling commands ---- */
2874 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2875 u8 ident, u16 dlen, void *data)
2877 struct sk_buff *skb, **frag;
2878 struct l2cap_cmd_hdr *cmd;
2879 struct l2cap_hdr *lh;
2882 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2883 conn, code, ident, dlen);
2885 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2888 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2889 count = min_t(unsigned int, conn->mtu, len);
2891 skb = bt_skb_alloc(count, GFP_KERNEL);
2895 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2896 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2898 if (conn->hcon->type == LE_LINK)
2899 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2901 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2903 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2906 cmd->len = cpu_to_le16(dlen);
2909 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2910 memcpy(skb_put(skb, count), data, count);
2916 /* Continuation fragments (no L2CAP header) */
2917 frag = &skb_shinfo(skb)->frag_list;
2919 count = min_t(unsigned int, conn->mtu, len);
2921 *frag = bt_skb_alloc(count, GFP_KERNEL);
2925 memcpy(skb_put(*frag, count), data, count);
2930 frag = &(*frag)->next;
2940 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2943 struct l2cap_conf_opt *opt = *ptr;
2946 len = L2CAP_CONF_OPT_SIZE + opt->len;
2954 *val = *((u8 *) opt->val);
2958 *val = get_unaligned_le16(opt->val);
2962 *val = get_unaligned_le32(opt->val);
2966 *val = (unsigned long) opt->val;
2970 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2974 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2976 struct l2cap_conf_opt *opt = *ptr;
2978 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2985 *((u8 *) opt->val) = val;
2989 put_unaligned_le16(val, opt->val);
2993 put_unaligned_le32(val, opt->val);
2997 memcpy(opt->val, (void *) val, len);
3001 *ptr += L2CAP_CONF_OPT_SIZE + len;
3004 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3006 struct l2cap_conf_efs efs;
3008 switch (chan->mode) {
3009 case L2CAP_MODE_ERTM:
3010 efs.id = chan->local_id;
3011 efs.stype = chan->local_stype;
3012 efs.msdu = cpu_to_le16(chan->local_msdu);
3013 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3014 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3015 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3018 case L2CAP_MODE_STREAMING:
3020 efs.stype = L2CAP_SERV_BESTEFFORT;
3021 efs.msdu = cpu_to_le16(chan->local_msdu);
3022 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3031 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3032 (unsigned long) &efs);
3035 static void l2cap_ack_timeout(struct work_struct *work)
3037 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3041 BT_DBG("chan %p", chan);
3043 l2cap_chan_lock(chan);
3045 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3046 chan->last_acked_seq);
3049 l2cap_send_rr_or_rnr(chan, 0);
3051 l2cap_chan_unlock(chan);
3052 l2cap_chan_put(chan);
3055 int l2cap_ertm_init(struct l2cap_chan *chan)
3059 chan->next_tx_seq = 0;
3060 chan->expected_tx_seq = 0;
3061 chan->expected_ack_seq = 0;
3062 chan->unacked_frames = 0;
3063 chan->buffer_seq = 0;
3064 chan->frames_sent = 0;
3065 chan->last_acked_seq = 0;
3067 chan->sdu_last_frag = NULL;
3070 skb_queue_head_init(&chan->tx_q);
3072 chan->local_amp_id = AMP_ID_BREDR;
3073 chan->move_id = AMP_ID_BREDR;
3074 chan->move_state = L2CAP_MOVE_STABLE;
3075 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3077 if (chan->mode != L2CAP_MODE_ERTM)
3080 chan->rx_state = L2CAP_RX_STATE_RECV;
3081 chan->tx_state = L2CAP_TX_STATE_XMIT;
3083 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3084 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3085 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3087 skb_queue_head_init(&chan->srej_q);
3089 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3093 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3095 l2cap_seq_list_free(&chan->srej_list);
3100 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3103 case L2CAP_MODE_STREAMING:
3104 case L2CAP_MODE_ERTM:
3105 if (l2cap_mode_supported(mode, remote_feat_mask))
3109 return L2CAP_MODE_BASIC;
3113 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3115 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3118 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3120 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3123 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3124 struct l2cap_conf_rfc *rfc)
3126 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3127 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3129 /* Class 1 devices have must have ERTM timeouts
3130 * exceeding the Link Supervision Timeout. The
3131 * default Link Supervision Timeout for AMP
3132 * controllers is 10 seconds.
3134 * Class 1 devices use 0xffffffff for their
3135 * best-effort flush timeout, so the clamping logic
3136 * will result in a timeout that meets the above
3137 * requirement. ERTM timeouts are 16-bit values, so
3138 * the maximum timeout is 65.535 seconds.
3141 /* Convert timeout to milliseconds and round */
3142 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3144 /* This is the recommended formula for class 2 devices
3145 * that start ERTM timers when packets are sent to the
3148 ertm_to = 3 * ertm_to + 500;
3150 if (ertm_to > 0xffff)
3153 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3154 rfc->monitor_timeout = rfc->retrans_timeout;
3156 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3157 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3161 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3163 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3164 __l2cap_ews_supported(chan->conn)) {
3165 /* use extended control field */
3166 set_bit(FLAG_EXT_CTRL, &chan->flags);
3167 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3169 chan->tx_win = min_t(u16, chan->tx_win,
3170 L2CAP_DEFAULT_TX_WINDOW);
3171 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3173 chan->ack_win = chan->tx_win;
3176 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3178 struct l2cap_conf_req *req = data;
3179 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3180 void *ptr = req->data;
3183 BT_DBG("chan %p", chan);
3185 if (chan->num_conf_req || chan->num_conf_rsp)
3188 switch (chan->mode) {
3189 case L2CAP_MODE_STREAMING:
3190 case L2CAP_MODE_ERTM:
3191 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3194 if (__l2cap_efs_supported(chan->conn))
3195 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3199 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3204 if (chan->imtu != L2CAP_DEFAULT_MTU)
3205 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3207 switch (chan->mode) {
3208 case L2CAP_MODE_BASIC:
3209 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3210 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3213 rfc.mode = L2CAP_MODE_BASIC;
3215 rfc.max_transmit = 0;
3216 rfc.retrans_timeout = 0;
3217 rfc.monitor_timeout = 0;
3218 rfc.max_pdu_size = 0;
3220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3221 (unsigned long) &rfc);
3224 case L2CAP_MODE_ERTM:
3225 rfc.mode = L2CAP_MODE_ERTM;
3226 rfc.max_transmit = chan->max_tx;
3228 __l2cap_set_ertm_timeouts(chan, &rfc);
3230 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3231 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3233 rfc.max_pdu_size = cpu_to_le16(size);
3235 l2cap_txwin_setup(chan);
3237 rfc.txwin_size = min_t(u16, chan->tx_win,
3238 L2CAP_DEFAULT_TX_WINDOW);
3240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3241 (unsigned long) &rfc);
3243 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3244 l2cap_add_opt_efs(&ptr, chan);
3246 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3250 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3251 if (chan->fcs == L2CAP_FCS_NONE ||
3252 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3253 chan->fcs = L2CAP_FCS_NONE;
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3259 case L2CAP_MODE_STREAMING:
3260 l2cap_txwin_setup(chan);
3261 rfc.mode = L2CAP_MODE_STREAMING;
3263 rfc.max_transmit = 0;
3264 rfc.retrans_timeout = 0;
3265 rfc.monitor_timeout = 0;
3267 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3268 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3270 rfc.max_pdu_size = cpu_to_le16(size);
3272 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3273 (unsigned long) &rfc);
3275 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3276 l2cap_add_opt_efs(&ptr, chan);
3278 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3279 if (chan->fcs == L2CAP_FCS_NONE ||
3280 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3281 chan->fcs = L2CAP_FCS_NONE;
3282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3288 req->dcid = cpu_to_le16(chan->dcid);
3289 req->flags = __constant_cpu_to_le16(0);
3294 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3296 struct l2cap_conf_rsp *rsp = data;
3297 void *ptr = rsp->data;
3298 void *req = chan->conf_req;
3299 int len = chan->conf_len;
3300 int type, hint, olen;
3302 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3303 struct l2cap_conf_efs efs;
3305 u16 mtu = L2CAP_DEFAULT_MTU;
3306 u16 result = L2CAP_CONF_SUCCESS;
3309 BT_DBG("chan %p", chan);
3311 while (len >= L2CAP_CONF_OPT_SIZE) {
3312 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3314 hint = type & L2CAP_CONF_HINT;
3315 type &= L2CAP_CONF_MASK;
3318 case L2CAP_CONF_MTU:
3322 case L2CAP_CONF_FLUSH_TO:
3323 chan->flush_to = val;
3326 case L2CAP_CONF_QOS:
3329 case L2CAP_CONF_RFC:
3330 if (olen == sizeof(rfc))
3331 memcpy(&rfc, (void *) val, olen);
3334 case L2CAP_CONF_FCS:
3335 if (val == L2CAP_FCS_NONE)
3336 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3339 case L2CAP_CONF_EFS:
3341 if (olen == sizeof(efs))
3342 memcpy(&efs, (void *) val, olen);
3345 case L2CAP_CONF_EWS:
3346 if (!chan->conn->hs_enabled)
3347 return -ECONNREFUSED;
3349 set_bit(FLAG_EXT_CTRL, &chan->flags);
3350 set_bit(CONF_EWS_RECV, &chan->conf_state);
3351 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3352 chan->remote_tx_win = val;
3359 result = L2CAP_CONF_UNKNOWN;
3360 *((u8 *) ptr++) = type;
3365 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3368 switch (chan->mode) {
3369 case L2CAP_MODE_STREAMING:
3370 case L2CAP_MODE_ERTM:
3371 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3372 chan->mode = l2cap_select_mode(rfc.mode,
3373 chan->conn->feat_mask);
3378 if (__l2cap_efs_supported(chan->conn))
3379 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3381 return -ECONNREFUSED;
3384 if (chan->mode != rfc.mode)
3385 return -ECONNREFUSED;
3391 if (chan->mode != rfc.mode) {
3392 result = L2CAP_CONF_UNACCEPT;
3393 rfc.mode = chan->mode;
3395 if (chan->num_conf_rsp == 1)
3396 return -ECONNREFUSED;
3398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3399 (unsigned long) &rfc);
3402 if (result == L2CAP_CONF_SUCCESS) {
3403 /* Configure output options and let the other side know
3404 * which ones we don't like. */
3406 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3407 result = L2CAP_CONF_UNACCEPT;
3410 set_bit(CONF_MTU_DONE, &chan->conf_state);
3412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3415 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3416 efs.stype != L2CAP_SERV_NOTRAFIC &&
3417 efs.stype != chan->local_stype) {
3419 result = L2CAP_CONF_UNACCEPT;
3421 if (chan->num_conf_req >= 1)
3422 return -ECONNREFUSED;
3424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3426 (unsigned long) &efs);
3428 /* Send PENDING Conf Rsp */
3429 result = L2CAP_CONF_PENDING;
3430 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3435 case L2CAP_MODE_BASIC:
3436 chan->fcs = L2CAP_FCS_NONE;
3437 set_bit(CONF_MODE_DONE, &chan->conf_state);
3440 case L2CAP_MODE_ERTM:
3441 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3442 chan->remote_tx_win = rfc.txwin_size;
3444 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3446 chan->remote_max_tx = rfc.max_transmit;
3448 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3449 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3450 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3451 rfc.max_pdu_size = cpu_to_le16(size);
3452 chan->remote_mps = size;
3454 __l2cap_set_ertm_timeouts(chan, &rfc);
3456 set_bit(CONF_MODE_DONE, &chan->conf_state);
3458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3459 sizeof(rfc), (unsigned long) &rfc);
3461 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3462 chan->remote_id = efs.id;
3463 chan->remote_stype = efs.stype;
3464 chan->remote_msdu = le16_to_cpu(efs.msdu);
3465 chan->remote_flush_to =
3466 le32_to_cpu(efs.flush_to);
3467 chan->remote_acc_lat =
3468 le32_to_cpu(efs.acc_lat);
3469 chan->remote_sdu_itime =
3470 le32_to_cpu(efs.sdu_itime);
3471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3473 (unsigned long) &efs);
3477 case L2CAP_MODE_STREAMING:
3478 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3479 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3480 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3481 rfc.max_pdu_size = cpu_to_le16(size);
3482 chan->remote_mps = size;
3484 set_bit(CONF_MODE_DONE, &chan->conf_state);
3486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3487 (unsigned long) &rfc);
3492 result = L2CAP_CONF_UNACCEPT;
3494 memset(&rfc, 0, sizeof(rfc));
3495 rfc.mode = chan->mode;
3498 if (result == L2CAP_CONF_SUCCESS)
3499 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3501 rsp->scid = cpu_to_le16(chan->dcid);
3502 rsp->result = cpu_to_le16(result);
3503 rsp->flags = __constant_cpu_to_le16(0);
3508 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3509 void *data, u16 *result)
3511 struct l2cap_conf_req *req = data;
3512 void *ptr = req->data;
3515 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3516 struct l2cap_conf_efs efs;
3518 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3520 while (len >= L2CAP_CONF_OPT_SIZE) {
3521 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3524 case L2CAP_CONF_MTU:
3525 if (val < L2CAP_DEFAULT_MIN_MTU) {
3526 *result = L2CAP_CONF_UNACCEPT;
3527 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3533 case L2CAP_CONF_FLUSH_TO:
3534 chan->flush_to = val;
3535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3539 case L2CAP_CONF_RFC:
3540 if (olen == sizeof(rfc))
3541 memcpy(&rfc, (void *)val, olen);
3543 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3544 rfc.mode != chan->mode)
3545 return -ECONNREFUSED;
3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3550 sizeof(rfc), (unsigned long) &rfc);
3553 case L2CAP_CONF_EWS:
3554 chan->ack_win = min_t(u16, val, chan->ack_win);
3555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3559 case L2CAP_CONF_EFS:
3560 if (olen == sizeof(efs))
3561 memcpy(&efs, (void *)val, olen);
3563 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3564 efs.stype != L2CAP_SERV_NOTRAFIC &&
3565 efs.stype != chan->local_stype)
3566 return -ECONNREFUSED;
3568 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3569 (unsigned long) &efs);
3572 case L2CAP_CONF_FCS:
3573 if (*result == L2CAP_CONF_PENDING)
3574 if (val == L2CAP_FCS_NONE)
3575 set_bit(CONF_RECV_NO_FCS,
3581 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3582 return -ECONNREFUSED;
3584 chan->mode = rfc.mode;
3586 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3588 case L2CAP_MODE_ERTM:
3589 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3590 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3591 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3592 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3593 chan->ack_win = min_t(u16, chan->ack_win,
3596 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3597 chan->local_msdu = le16_to_cpu(efs.msdu);
3598 chan->local_sdu_itime =
3599 le32_to_cpu(efs.sdu_itime);
3600 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3601 chan->local_flush_to =
3602 le32_to_cpu(efs.flush_to);
3606 case L2CAP_MODE_STREAMING:
3607 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3611 req->dcid = cpu_to_le16(chan->dcid);
3612 req->flags = __constant_cpu_to_le16(0);
3617 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3618 u16 result, u16 flags)
3620 struct l2cap_conf_rsp *rsp = data;
3621 void *ptr = rsp->data;
3623 BT_DBG("chan %p", chan);
3625 rsp->scid = cpu_to_le16(chan->dcid);
3626 rsp->result = cpu_to_le16(result);
3627 rsp->flags = cpu_to_le16(flags);
3632 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3634 struct l2cap_conn_rsp rsp;
3635 struct l2cap_conn *conn = chan->conn;
3639 rsp.scid = cpu_to_le16(chan->dcid);
3640 rsp.dcid = cpu_to_le16(chan->scid);
3641 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3642 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3645 rsp_code = L2CAP_CREATE_CHAN_RSP;
3647 rsp_code = L2CAP_CONN_RSP;
3649 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3651 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3653 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3656 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3657 l2cap_build_conf_req(chan, buf), buf);
3658 chan->num_conf_req++;
3661 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3665 /* Use sane default values in case a misbehaving remote device
3666 * did not send an RFC or extended window size option.
3668 u16 txwin_ext = chan->ack_win;
3669 struct l2cap_conf_rfc rfc = {
3671 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3672 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3673 .max_pdu_size = cpu_to_le16(chan->imtu),
3674 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3677 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3679 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3682 while (len >= L2CAP_CONF_OPT_SIZE) {
3683 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3686 case L2CAP_CONF_RFC:
3687 if (olen == sizeof(rfc))
3688 memcpy(&rfc, (void *)val, olen);
3690 case L2CAP_CONF_EWS:
3697 case L2CAP_MODE_ERTM:
3698 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3699 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3700 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3701 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3702 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3704 chan->ack_win = min_t(u16, chan->ack_win,
3707 case L2CAP_MODE_STREAMING:
3708 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3712 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3713 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3716 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3718 if (cmd_len < sizeof(*rej))
3721 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3724 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3725 cmd->ident == conn->info_ident) {
3726 cancel_delayed_work(&conn->info_timer);
3728 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3729 conn->info_ident = 0;
3731 l2cap_conn_start(conn);
3737 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3738 struct l2cap_cmd_hdr *cmd,
3739 u8 *data, u8 rsp_code, u8 amp_id)
3741 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3742 struct l2cap_conn_rsp rsp;
3743 struct l2cap_chan *chan = NULL, *pchan;
3744 struct sock *parent, *sk = NULL;
3745 int result, status = L2CAP_CS_NO_INFO;
3747 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3748 __le16 psm = req->psm;
3750 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3752 /* Check if we have socket listening on psm */
3753 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3756 result = L2CAP_CR_BAD_PSM;
3762 mutex_lock(&conn->chan_lock);
3765 /* Check if the ACL is secure enough (if not SDP) */
3766 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3767 !hci_conn_check_link_mode(conn->hcon)) {
3768 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3769 result = L2CAP_CR_SEC_BLOCK;
3773 result = L2CAP_CR_NO_MEM;
3775 /* Check if we already have channel with that dcid */
3776 if (__l2cap_get_chan_by_dcid(conn, scid))
3779 chan = pchan->ops->new_connection(pchan);
3785 /* For certain devices (ex: HID mouse), support for authentication,
3786 * pairing and bonding is optional. For such devices, inorder to avoid
3787 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3788 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3790 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3792 bacpy(&chan->src, &conn->hcon->src);
3793 bacpy(&chan->dst, &conn->hcon->dst);
3796 chan->local_amp_id = amp_id;
3798 __l2cap_chan_add(conn, chan);
3802 __set_chan_timer(chan, sk->sk_sndtimeo);
3804 chan->ident = cmd->ident;
3806 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3807 if (l2cap_chan_check_security(chan)) {
3808 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3809 __l2cap_state_change(chan, BT_CONNECT2);
3810 result = L2CAP_CR_PEND;
3811 status = L2CAP_CS_AUTHOR_PEND;
3812 chan->ops->defer(chan);
3814 /* Force pending result for AMP controllers.
3815 * The connection will succeed after the
3816 * physical link is up.
3818 if (amp_id == AMP_ID_BREDR) {
3819 __l2cap_state_change(chan, BT_CONFIG);
3820 result = L2CAP_CR_SUCCESS;
3822 __l2cap_state_change(chan, BT_CONNECT2);
3823 result = L2CAP_CR_PEND;
3825 status = L2CAP_CS_NO_INFO;
3828 __l2cap_state_change(chan, BT_CONNECT2);
3829 result = L2CAP_CR_PEND;
3830 status = L2CAP_CS_AUTHEN_PEND;
3833 __l2cap_state_change(chan, BT_CONNECT2);
3834 result = L2CAP_CR_PEND;
3835 status = L2CAP_CS_NO_INFO;
3839 release_sock(parent);
3840 mutex_unlock(&conn->chan_lock);
3843 rsp.scid = cpu_to_le16(scid);
3844 rsp.dcid = cpu_to_le16(dcid);
3845 rsp.result = cpu_to_le16(result);
3846 rsp.status = cpu_to_le16(status);
3847 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3849 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3850 struct l2cap_info_req info;
3851 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3853 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3854 conn->info_ident = l2cap_get_ident(conn);
3856 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3858 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3859 sizeof(info), &info);
3862 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3863 result == L2CAP_CR_SUCCESS) {
3865 set_bit(CONF_REQ_SENT, &chan->conf_state);
3866 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3867 l2cap_build_conf_req(chan, buf), buf);
3868 chan->num_conf_req++;
3874 static int l2cap_connect_req(struct l2cap_conn *conn,
3875 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3877 struct hci_dev *hdev = conn->hcon->hdev;
3878 struct hci_conn *hcon = conn->hcon;
3880 if (cmd_len < sizeof(struct l2cap_conn_req))
3884 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3885 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3886 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3887 hcon->dst_type, 0, NULL, 0,
3889 hci_dev_unlock(hdev);
3891 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3895 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3896 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3899 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3900 u16 scid, dcid, result, status;
3901 struct l2cap_chan *chan;
3905 if (cmd_len < sizeof(*rsp))
3908 scid = __le16_to_cpu(rsp->scid);
3909 dcid = __le16_to_cpu(rsp->dcid);
3910 result = __le16_to_cpu(rsp->result);
3911 status = __le16_to_cpu(rsp->status);
3913 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3914 dcid, scid, result, status);
3916 mutex_lock(&conn->chan_lock);
3919 chan = __l2cap_get_chan_by_scid(conn, scid);
3925 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3934 l2cap_chan_lock(chan);
3937 case L2CAP_CR_SUCCESS:
3938 l2cap_state_change(chan, BT_CONFIG);
3941 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3943 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3946 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3947 l2cap_build_conf_req(chan, req), req);
3948 chan->num_conf_req++;
3952 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3956 l2cap_chan_del(chan, ECONNREFUSED);
3960 l2cap_chan_unlock(chan);
3963 mutex_unlock(&conn->chan_lock);
3968 static inline void set_default_fcs(struct l2cap_chan *chan)
3970 /* FCS is enabled only in ERTM or streaming mode, if one or both
3973 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3974 chan->fcs = L2CAP_FCS_NONE;
3975 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3976 chan->fcs = L2CAP_FCS_CRC16;
3979 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3980 u8 ident, u16 flags)
3982 struct l2cap_conn *conn = chan->conn;
3984 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3987 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3988 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3990 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3991 l2cap_build_conf_rsp(chan, data,
3992 L2CAP_CONF_SUCCESS, flags), data);
3995 static inline int l2cap_config_req(struct l2cap_conn *conn,
3996 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3999 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4002 struct l2cap_chan *chan;
4005 if (cmd_len < sizeof(*req))
4008 dcid = __le16_to_cpu(req->dcid);
4009 flags = __le16_to_cpu(req->flags);
4011 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4013 chan = l2cap_get_chan_by_scid(conn, dcid);
4017 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4018 struct l2cap_cmd_rej_cid rej;
4020 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4021 rej.scid = cpu_to_le16(chan->scid);
4022 rej.dcid = cpu_to_le16(chan->dcid);
4024 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4029 /* Reject if config buffer is too small. */
4030 len = cmd_len - sizeof(*req);
4031 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4032 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4033 l2cap_build_conf_rsp(chan, rsp,
4034 L2CAP_CONF_REJECT, flags), rsp);
4039 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4040 chan->conf_len += len;
4042 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4043 /* Incomplete config. Send empty response. */
4044 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4045 l2cap_build_conf_rsp(chan, rsp,
4046 L2CAP_CONF_SUCCESS, flags), rsp);
4050 /* Complete config. */
4051 len = l2cap_parse_conf_req(chan, rsp);
4053 l2cap_send_disconn_req(chan, ECONNRESET);
4057 chan->ident = cmd->ident;
4058 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4059 chan->num_conf_rsp++;
4061 /* Reset config buffer. */
4064 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4067 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4068 set_default_fcs(chan);
4070 if (chan->mode == L2CAP_MODE_ERTM ||
4071 chan->mode == L2CAP_MODE_STREAMING)
4072 err = l2cap_ertm_init(chan);
4075 l2cap_send_disconn_req(chan, -err);
4077 l2cap_chan_ready(chan);
4082 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4084 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4085 l2cap_build_conf_req(chan, buf), buf);
4086 chan->num_conf_req++;
4089 /* Got Conf Rsp PENDING from remote side and asume we sent
4090 Conf Rsp PENDING in the code above */
4091 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4092 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4094 /* check compatibility */
4096 /* Send rsp for BR/EDR channel */
4098 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4100 chan->ident = cmd->ident;
4104 l2cap_chan_unlock(chan);
4108 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4109 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4112 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4113 u16 scid, flags, result;
4114 struct l2cap_chan *chan;
4115 int len = cmd_len - sizeof(*rsp);
4118 if (cmd_len < sizeof(*rsp))
4121 scid = __le16_to_cpu(rsp->scid);
4122 flags = __le16_to_cpu(rsp->flags);
4123 result = __le16_to_cpu(rsp->result);
4125 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4128 chan = l2cap_get_chan_by_scid(conn, scid);
4133 case L2CAP_CONF_SUCCESS:
4134 l2cap_conf_rfc_get(chan, rsp->data, len);
4135 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4138 case L2CAP_CONF_PENDING:
4139 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4141 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4144 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4147 l2cap_send_disconn_req(chan, ECONNRESET);
4151 if (!chan->hs_hcon) {
4152 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4155 if (l2cap_check_efs(chan)) {
4156 amp_create_logical_link(chan);
4157 chan->ident = cmd->ident;
4163 case L2CAP_CONF_UNACCEPT:
4164 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4167 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4168 l2cap_send_disconn_req(chan, ECONNRESET);
4172 /* throw out any old stored conf requests */
4173 result = L2CAP_CONF_SUCCESS;
4174 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4177 l2cap_send_disconn_req(chan, ECONNRESET);
4181 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4182 L2CAP_CONF_REQ, len, req);
4183 chan->num_conf_req++;
4184 if (result != L2CAP_CONF_SUCCESS)
4190 l2cap_chan_set_err(chan, ECONNRESET);
4192 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4193 l2cap_send_disconn_req(chan, ECONNRESET);
4197 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4200 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4202 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4203 set_default_fcs(chan);
4205 if (chan->mode == L2CAP_MODE_ERTM ||
4206 chan->mode == L2CAP_MODE_STREAMING)
4207 err = l2cap_ertm_init(chan);
4210 l2cap_send_disconn_req(chan, -err);
4212 l2cap_chan_ready(chan);
4216 l2cap_chan_unlock(chan);
4220 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4221 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4224 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4225 struct l2cap_disconn_rsp rsp;
4227 struct l2cap_chan *chan;
4230 if (cmd_len != sizeof(*req))
4233 scid = __le16_to_cpu(req->scid);
4234 dcid = __le16_to_cpu(req->dcid);
4236 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4238 mutex_lock(&conn->chan_lock);
4240 chan = __l2cap_get_chan_by_scid(conn, dcid);
4242 mutex_unlock(&conn->chan_lock);
4246 l2cap_chan_lock(chan);
4250 rsp.dcid = cpu_to_le16(chan->scid);
4251 rsp.scid = cpu_to_le16(chan->dcid);
4252 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4255 sk->sk_shutdown = SHUTDOWN_MASK;
4258 l2cap_chan_hold(chan);
4259 l2cap_chan_del(chan, ECONNRESET);
4261 l2cap_chan_unlock(chan);
4263 chan->ops->close(chan);
4264 l2cap_chan_put(chan);
4266 mutex_unlock(&conn->chan_lock);
4271 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4272 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4275 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4277 struct l2cap_chan *chan;
4279 if (cmd_len != sizeof(*rsp))
4282 scid = __le16_to_cpu(rsp->scid);
4283 dcid = __le16_to_cpu(rsp->dcid);
4285 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4287 mutex_lock(&conn->chan_lock);
4289 chan = __l2cap_get_chan_by_scid(conn, scid);
4291 mutex_unlock(&conn->chan_lock);
4295 l2cap_chan_lock(chan);
4297 l2cap_chan_hold(chan);
4298 l2cap_chan_del(chan, 0);
4300 l2cap_chan_unlock(chan);
4302 chan->ops->close(chan);
4303 l2cap_chan_put(chan);
4305 mutex_unlock(&conn->chan_lock);
4310 static inline int l2cap_information_req(struct l2cap_conn *conn,
4311 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4314 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4317 if (cmd_len != sizeof(*req))
4320 type = __le16_to_cpu(req->type);
4322 BT_DBG("type 0x%4.4x", type);
4324 if (type == L2CAP_IT_FEAT_MASK) {
4326 u32 feat_mask = l2cap_feat_mask;
4327 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4328 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4329 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4331 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4333 if (conn->hs_enabled)
4334 feat_mask |= L2CAP_FEAT_EXT_FLOW
4335 | L2CAP_FEAT_EXT_WINDOW;
4337 put_unaligned_le32(feat_mask, rsp->data);
4338 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4340 } else if (type == L2CAP_IT_FIXED_CHAN) {
4342 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4344 if (conn->hs_enabled)
4345 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4347 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4349 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4350 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4351 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4352 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4355 struct l2cap_info_rsp rsp;
4356 rsp.type = cpu_to_le16(type);
4357 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4358 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4365 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4366 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4369 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4372 if (cmd_len < sizeof(*rsp))
4375 type = __le16_to_cpu(rsp->type);
4376 result = __le16_to_cpu(rsp->result);
4378 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4380 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4381 if (cmd->ident != conn->info_ident ||
4382 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4385 cancel_delayed_work(&conn->info_timer);
4387 if (result != L2CAP_IR_SUCCESS) {
4388 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4389 conn->info_ident = 0;
4391 l2cap_conn_start(conn);
4397 case L2CAP_IT_FEAT_MASK:
4398 conn->feat_mask = get_unaligned_le32(rsp->data);
4400 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4401 struct l2cap_info_req req;
4402 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4404 conn->info_ident = l2cap_get_ident(conn);
4406 l2cap_send_cmd(conn, conn->info_ident,
4407 L2CAP_INFO_REQ, sizeof(req), &req);
4409 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4410 conn->info_ident = 0;
4412 l2cap_conn_start(conn);
4416 case L2CAP_IT_FIXED_CHAN:
4417 conn->fixed_chan_mask = rsp->data[0];
4418 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4419 conn->info_ident = 0;
4421 l2cap_conn_start(conn);
4428 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4429 struct l2cap_cmd_hdr *cmd,
4430 u16 cmd_len, void *data)
4432 struct l2cap_create_chan_req *req = data;
4433 struct l2cap_create_chan_rsp rsp;
4434 struct l2cap_chan *chan;
4435 struct hci_dev *hdev;
4438 if (cmd_len != sizeof(*req))
4441 if (!conn->hs_enabled)
4444 psm = le16_to_cpu(req->psm);
4445 scid = le16_to_cpu(req->scid);
4447 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4449 /* For controller id 0 make BR/EDR connection */
4450 if (req->amp_id == AMP_ID_BREDR) {
4451 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4456 /* Validate AMP controller id */
4457 hdev = hci_dev_get(req->amp_id);
4461 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4466 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4469 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4470 struct hci_conn *hs_hcon;
4472 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4479 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4481 mgr->bredr_chan = chan;
4482 chan->hs_hcon = hs_hcon;
4483 chan->fcs = L2CAP_FCS_NONE;
4484 conn->mtu = hdev->block_mtu;
4493 rsp.scid = cpu_to_le16(scid);
4494 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4495 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4497 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4503 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4505 struct l2cap_move_chan_req req;
4508 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4510 ident = l2cap_get_ident(chan->conn);
4511 chan->ident = ident;
4513 req.icid = cpu_to_le16(chan->scid);
4514 req.dest_amp_id = dest_amp_id;
4516 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4519 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4522 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4524 struct l2cap_move_chan_rsp rsp;
4526 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4528 rsp.icid = cpu_to_le16(chan->dcid);
4529 rsp.result = cpu_to_le16(result);
4531 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4535 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4537 struct l2cap_move_chan_cfm cfm;
4539 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4541 chan->ident = l2cap_get_ident(chan->conn);
4543 cfm.icid = cpu_to_le16(chan->scid);
4544 cfm.result = cpu_to_le16(result);
4546 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4549 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4552 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4554 struct l2cap_move_chan_cfm cfm;
4556 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4558 cfm.icid = cpu_to_le16(icid);
4559 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4561 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4565 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4568 struct l2cap_move_chan_cfm_rsp rsp;
4570 BT_DBG("icid 0x%4.4x", icid);
4572 rsp.icid = cpu_to_le16(icid);
4573 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4576 static void __release_logical_link(struct l2cap_chan *chan)
4578 chan->hs_hchan = NULL;
4579 chan->hs_hcon = NULL;
4581 /* Placeholder - release the logical link */
4584 static void l2cap_logical_fail(struct l2cap_chan *chan)
4586 /* Logical link setup failed */
4587 if (chan->state != BT_CONNECTED) {
4588 /* Create channel failure, disconnect */
4589 l2cap_send_disconn_req(chan, ECONNRESET);
4593 switch (chan->move_role) {
4594 case L2CAP_MOVE_ROLE_RESPONDER:
4595 l2cap_move_done(chan);
4596 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4598 case L2CAP_MOVE_ROLE_INITIATOR:
4599 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4600 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4601 /* Remote has only sent pending or
4602 * success responses, clean up
4604 l2cap_move_done(chan);
4607 /* Other amp move states imply that the move
4608 * has already aborted
4610 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4615 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4616 struct hci_chan *hchan)
4618 struct l2cap_conf_rsp rsp;
4620 chan->hs_hchan = hchan;
4621 chan->hs_hcon->l2cap_data = chan->conn;
4623 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4625 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4628 set_default_fcs(chan);
4630 err = l2cap_ertm_init(chan);
4632 l2cap_send_disconn_req(chan, -err);
4634 l2cap_chan_ready(chan);
4638 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4639 struct hci_chan *hchan)
4641 chan->hs_hcon = hchan->conn;
4642 chan->hs_hcon->l2cap_data = chan->conn;
4644 BT_DBG("move_state %d", chan->move_state);
4646 switch (chan->move_state) {
4647 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4648 /* Move confirm will be sent after a success
4649 * response is received
4651 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4653 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4654 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4655 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4656 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4657 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4658 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4659 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4660 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4661 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4665 /* Move was not in expected state, free the channel */
4666 __release_logical_link(chan);
4668 chan->move_state = L2CAP_MOVE_STABLE;
4672 /* Call with chan locked */
4673 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4676 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4679 l2cap_logical_fail(chan);
4680 __release_logical_link(chan);
4684 if (chan->state != BT_CONNECTED) {
4685 /* Ignore logical link if channel is on BR/EDR */
4686 if (chan->local_amp_id != AMP_ID_BREDR)
4687 l2cap_logical_finish_create(chan, hchan);
4689 l2cap_logical_finish_move(chan, hchan);
4693 void l2cap_move_start(struct l2cap_chan *chan)
4695 BT_DBG("chan %p", chan);
4697 if (chan->local_amp_id == AMP_ID_BREDR) {
4698 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4700 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4701 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4702 /* Placeholder - start physical link setup */
4704 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4705 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4707 l2cap_move_setup(chan);
4708 l2cap_send_move_chan_req(chan, 0);
4712 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4713 u8 local_amp_id, u8 remote_amp_id)
4715 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4716 local_amp_id, remote_amp_id);
4718 chan->fcs = L2CAP_FCS_NONE;
4720 /* Outgoing channel on AMP */
4721 if (chan->state == BT_CONNECT) {
4722 if (result == L2CAP_CR_SUCCESS) {
4723 chan->local_amp_id = local_amp_id;
4724 l2cap_send_create_chan_req(chan, remote_amp_id);
4726 /* Revert to BR/EDR connect */
4727 l2cap_send_conn_req(chan);
4733 /* Incoming channel on AMP */
4734 if (__l2cap_no_conn_pending(chan)) {
4735 struct l2cap_conn_rsp rsp;
4737 rsp.scid = cpu_to_le16(chan->dcid);
4738 rsp.dcid = cpu_to_le16(chan->scid);
4740 if (result == L2CAP_CR_SUCCESS) {
4741 /* Send successful response */
4742 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4743 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4745 /* Send negative response */
4746 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4747 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4750 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4753 if (result == L2CAP_CR_SUCCESS) {
4754 __l2cap_state_change(chan, BT_CONFIG);
4755 set_bit(CONF_REQ_SENT, &chan->conf_state);
4756 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4758 l2cap_build_conf_req(chan, buf), buf);
4759 chan->num_conf_req++;
4764 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4767 l2cap_move_setup(chan);
4768 chan->move_id = local_amp_id;
4769 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4771 l2cap_send_move_chan_req(chan, remote_amp_id);
4774 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4776 struct hci_chan *hchan = NULL;
4778 /* Placeholder - get hci_chan for logical link */
4781 if (hchan->state == BT_CONNECTED) {
4782 /* Logical link is ready to go */
4783 chan->hs_hcon = hchan->conn;
4784 chan->hs_hcon->l2cap_data = chan->conn;
4785 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4786 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4788 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4790 /* Wait for logical link to be ready */
4791 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4794 /* Logical link not available */
4795 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4799 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4801 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4803 if (result == -EINVAL)
4804 rsp_result = L2CAP_MR_BAD_ID;
4806 rsp_result = L2CAP_MR_NOT_ALLOWED;
4808 l2cap_send_move_chan_rsp(chan, rsp_result);
4811 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4812 chan->move_state = L2CAP_MOVE_STABLE;
4814 /* Restart data transmission */
4815 l2cap_ertm_send(chan);
4818 /* Invoke with locked chan */
4819 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4821 u8 local_amp_id = chan->local_amp_id;
4822 u8 remote_amp_id = chan->remote_amp_id;
4824 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4825 chan, result, local_amp_id, remote_amp_id);
4827 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4828 l2cap_chan_unlock(chan);
4832 if (chan->state != BT_CONNECTED) {
4833 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4834 } else if (result != L2CAP_MR_SUCCESS) {
4835 l2cap_do_move_cancel(chan, result);
4837 switch (chan->move_role) {
4838 case L2CAP_MOVE_ROLE_INITIATOR:
4839 l2cap_do_move_initiate(chan, local_amp_id,
4842 case L2CAP_MOVE_ROLE_RESPONDER:
4843 l2cap_do_move_respond(chan, result);
4846 l2cap_do_move_cancel(chan, result);
4852 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4853 struct l2cap_cmd_hdr *cmd,
4854 u16 cmd_len, void *data)
4856 struct l2cap_move_chan_req *req = data;
4857 struct l2cap_move_chan_rsp rsp;
4858 struct l2cap_chan *chan;
4860 u16 result = L2CAP_MR_NOT_ALLOWED;
4862 if (cmd_len != sizeof(*req))
4865 icid = le16_to_cpu(req->icid);
4867 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4869 if (!conn->hs_enabled)
4872 chan = l2cap_get_chan_by_dcid(conn, icid);
4874 rsp.icid = cpu_to_le16(icid);
4875 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4876 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4881 chan->ident = cmd->ident;
4883 if (chan->scid < L2CAP_CID_DYN_START ||
4884 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4885 (chan->mode != L2CAP_MODE_ERTM &&
4886 chan->mode != L2CAP_MODE_STREAMING)) {
4887 result = L2CAP_MR_NOT_ALLOWED;
4888 goto send_move_response;
4891 if (chan->local_amp_id == req->dest_amp_id) {
4892 result = L2CAP_MR_SAME_ID;
4893 goto send_move_response;
4896 if (req->dest_amp_id != AMP_ID_BREDR) {
4897 struct hci_dev *hdev;
4898 hdev = hci_dev_get(req->dest_amp_id);
4899 if (!hdev || hdev->dev_type != HCI_AMP ||
4900 !test_bit(HCI_UP, &hdev->flags)) {
4904 result = L2CAP_MR_BAD_ID;
4905 goto send_move_response;
4910 /* Detect a move collision. Only send a collision response
4911 * if this side has "lost", otherwise proceed with the move.
4912 * The winner has the larger bd_addr.
4914 if ((__chan_is_moving(chan) ||
4915 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4916 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4917 result = L2CAP_MR_COLLISION;
4918 goto send_move_response;
4921 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4922 l2cap_move_setup(chan);
4923 chan->move_id = req->dest_amp_id;
4926 if (req->dest_amp_id == AMP_ID_BREDR) {
4927 /* Moving to BR/EDR */
4928 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4929 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4930 result = L2CAP_MR_PEND;
4932 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4933 result = L2CAP_MR_SUCCESS;
4936 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4937 /* Placeholder - uncomment when amp functions are available */
4938 /*amp_accept_physical(chan, req->dest_amp_id);*/
4939 result = L2CAP_MR_PEND;
4943 l2cap_send_move_chan_rsp(chan, result);
4945 l2cap_chan_unlock(chan);
4950 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4952 struct l2cap_chan *chan;
4953 struct hci_chan *hchan = NULL;
4955 chan = l2cap_get_chan_by_scid(conn, icid);
4957 l2cap_send_move_chan_cfm_icid(conn, icid);
4961 __clear_chan_timer(chan);
4962 if (result == L2CAP_MR_PEND)
4963 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4965 switch (chan->move_state) {
4966 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4967 /* Move confirm will be sent when logical link
4970 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4972 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4973 if (result == L2CAP_MR_PEND) {
4975 } else if (test_bit(CONN_LOCAL_BUSY,
4976 &chan->conn_state)) {
4977 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4979 /* Logical link is up or moving to BR/EDR,
4982 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4983 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4986 case L2CAP_MOVE_WAIT_RSP:
4988 if (result == L2CAP_MR_SUCCESS) {
4989 /* Remote is ready, send confirm immediately
4990 * after logical link is ready
4992 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4994 /* Both logical link and move success
4995 * are required to confirm
4997 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5000 /* Placeholder - get hci_chan for logical link */
5002 /* Logical link not available */
5003 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5007 /* If the logical link is not yet connected, do not
5008 * send confirmation.
5010 if (hchan->state != BT_CONNECTED)
5013 /* Logical link is already ready to go */
5015 chan->hs_hcon = hchan->conn;
5016 chan->hs_hcon->l2cap_data = chan->conn;
5018 if (result == L2CAP_MR_SUCCESS) {
5019 /* Can confirm now */
5020 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5022 /* Now only need move success
5025 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5028 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5031 /* Any other amp move state means the move failed. */
5032 chan->move_id = chan->local_amp_id;
5033 l2cap_move_done(chan);
5034 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5037 l2cap_chan_unlock(chan);
5040 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5043 struct l2cap_chan *chan;
5045 chan = l2cap_get_chan_by_ident(conn, ident);
5047 /* Could not locate channel, icid is best guess */
5048 l2cap_send_move_chan_cfm_icid(conn, icid);
5052 __clear_chan_timer(chan);
5054 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5055 if (result == L2CAP_MR_COLLISION) {
5056 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5058 /* Cleanup - cancel move */
5059 chan->move_id = chan->local_amp_id;
5060 l2cap_move_done(chan);
5064 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5066 l2cap_chan_unlock(chan);
5069 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5070 struct l2cap_cmd_hdr *cmd,
5071 u16 cmd_len, void *data)
5073 struct l2cap_move_chan_rsp *rsp = data;
5076 if (cmd_len != sizeof(*rsp))
5079 icid = le16_to_cpu(rsp->icid);
5080 result = le16_to_cpu(rsp->result);
5082 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5084 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5085 l2cap_move_continue(conn, icid, result);
5087 l2cap_move_fail(conn, cmd->ident, icid, result);
5092 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5093 struct l2cap_cmd_hdr *cmd,
5094 u16 cmd_len, void *data)
5096 struct l2cap_move_chan_cfm *cfm = data;
5097 struct l2cap_chan *chan;
5100 if (cmd_len != sizeof(*cfm))
5103 icid = le16_to_cpu(cfm->icid);
5104 result = le16_to_cpu(cfm->result);
5106 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5108 chan = l2cap_get_chan_by_dcid(conn, icid);
5110 /* Spec requires a response even if the icid was not found */
5111 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5115 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5116 if (result == L2CAP_MC_CONFIRMED) {
5117 chan->local_amp_id = chan->move_id;
5118 if (chan->local_amp_id == AMP_ID_BREDR)
5119 __release_logical_link(chan);
5121 chan->move_id = chan->local_amp_id;
5124 l2cap_move_done(chan);
5127 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5129 l2cap_chan_unlock(chan);
5134 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5135 struct l2cap_cmd_hdr *cmd,
5136 u16 cmd_len, void *data)
5138 struct l2cap_move_chan_cfm_rsp *rsp = data;
5139 struct l2cap_chan *chan;
5142 if (cmd_len != sizeof(*rsp))
5145 icid = le16_to_cpu(rsp->icid);
5147 BT_DBG("icid 0x%4.4x", icid);
5149 chan = l2cap_get_chan_by_scid(conn, icid);
5153 __clear_chan_timer(chan);
5155 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5156 chan->local_amp_id = chan->move_id;
5158 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5159 __release_logical_link(chan);
5161 l2cap_move_done(chan);
5164 l2cap_chan_unlock(chan);
5169 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5174 if (min > max || min < 6 || max > 3200)
5177 if (to_multiplier < 10 || to_multiplier > 3200)
5180 if (max >= to_multiplier * 8)
5183 max_latency = (to_multiplier * 8 / max) - 1;
5184 if (latency > 499 || latency > max_latency)
5190 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5191 struct l2cap_cmd_hdr *cmd,
5194 struct hci_conn *hcon = conn->hcon;
5195 struct l2cap_conn_param_update_req *req;
5196 struct l2cap_conn_param_update_rsp rsp;
5197 u16 min, max, latency, to_multiplier, cmd_len;
5200 if (!(hcon->link_mode & HCI_LM_MASTER))
5203 cmd_len = __le16_to_cpu(cmd->len);
5204 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5207 req = (struct l2cap_conn_param_update_req *) data;
5208 min = __le16_to_cpu(req->min);
5209 max = __le16_to_cpu(req->max);
5210 latency = __le16_to_cpu(req->latency);
5211 to_multiplier = __le16_to_cpu(req->to_multiplier);
5213 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5214 min, max, latency, to_multiplier);
5216 memset(&rsp, 0, sizeof(rsp));
5218 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5220 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5222 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5224 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5228 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5233 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5234 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5239 switch (cmd->code) {
5240 case L2CAP_COMMAND_REJ:
5241 l2cap_command_rej(conn, cmd, cmd_len, data);
5244 case L2CAP_CONN_REQ:
5245 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5248 case L2CAP_CONN_RSP:
5249 case L2CAP_CREATE_CHAN_RSP:
5250 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5253 case L2CAP_CONF_REQ:
5254 err = l2cap_config_req(conn, cmd, cmd_len, data);
5257 case L2CAP_CONF_RSP:
5258 l2cap_config_rsp(conn, cmd, cmd_len, data);
5261 case L2CAP_DISCONN_REQ:
5262 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5265 case L2CAP_DISCONN_RSP:
5266 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5269 case L2CAP_ECHO_REQ:
5270 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5273 case L2CAP_ECHO_RSP:
5276 case L2CAP_INFO_REQ:
5277 err = l2cap_information_req(conn, cmd, cmd_len, data);
5280 case L2CAP_INFO_RSP:
5281 l2cap_information_rsp(conn, cmd, cmd_len, data);
5284 case L2CAP_CREATE_CHAN_REQ:
5285 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5288 case L2CAP_MOVE_CHAN_REQ:
5289 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5292 case L2CAP_MOVE_CHAN_RSP:
5293 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5296 case L2CAP_MOVE_CHAN_CFM:
5297 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5300 case L2CAP_MOVE_CHAN_CFM_RSP:
5301 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5305 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5313 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5314 struct l2cap_cmd_hdr *cmd, u8 *data)
5316 switch (cmd->code) {
5317 case L2CAP_COMMAND_REJ:
5320 case L2CAP_CONN_PARAM_UPDATE_REQ:
5321 return l2cap_conn_param_update_req(conn, cmd, data);
5323 case L2CAP_CONN_PARAM_UPDATE_RSP:
5327 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5332 static __le16 l2cap_err_to_reason(int err)
5336 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5338 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5342 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5346 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5347 struct sk_buff *skb)
5349 struct hci_conn *hcon = conn->hcon;
5350 struct l2cap_cmd_hdr *cmd;
5354 if (hcon->type != LE_LINK)
5357 if (skb->len < L2CAP_CMD_HDR_SIZE)
5360 cmd = (void *) skb->data;
5361 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5363 len = le16_to_cpu(cmd->len);
5365 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5367 if (len != skb->len || !cmd->ident) {
5368 BT_DBG("corrupted command");
5372 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5374 struct l2cap_cmd_rej_unk rej;
5376 BT_ERR("Wrong link type (%d)", err);
5378 rej.reason = l2cap_err_to_reason(err);
5379 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5387 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5388 struct sk_buff *skb)
5390 struct hci_conn *hcon = conn->hcon;
5391 u8 *data = skb->data;
5393 struct l2cap_cmd_hdr cmd;
5396 l2cap_raw_recv(conn, skb);
5398 if (hcon->type != ACL_LINK)
5401 while (len >= L2CAP_CMD_HDR_SIZE) {
5403 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5404 data += L2CAP_CMD_HDR_SIZE;
5405 len -= L2CAP_CMD_HDR_SIZE;
5407 cmd_len = le16_to_cpu(cmd.len);
5409 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5412 if (cmd_len > len || !cmd.ident) {
5413 BT_DBG("corrupted command");
5417 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5419 struct l2cap_cmd_rej_unk rej;
5421 BT_ERR("Wrong link type (%d)", err);
5423 rej.reason = l2cap_err_to_reason(err);
5424 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5436 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5438 u16 our_fcs, rcv_fcs;
5441 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5442 hdr_size = L2CAP_EXT_HDR_SIZE;
5444 hdr_size = L2CAP_ENH_HDR_SIZE;
5446 if (chan->fcs == L2CAP_FCS_CRC16) {
5447 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5448 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5449 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5451 if (our_fcs != rcv_fcs)
5457 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5459 struct l2cap_ctrl control;
5461 BT_DBG("chan %p", chan);
5463 memset(&control, 0, sizeof(control));
5466 control.reqseq = chan->buffer_seq;
5467 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5469 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5470 control.super = L2CAP_SUPER_RNR;
5471 l2cap_send_sframe(chan, &control);
5474 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5475 chan->unacked_frames > 0)
5476 __set_retrans_timer(chan);
5478 /* Send pending iframes */
5479 l2cap_ertm_send(chan);
5481 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5482 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5483 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5486 control.super = L2CAP_SUPER_RR;
5487 l2cap_send_sframe(chan, &control);
5491 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5492 struct sk_buff **last_frag)
5494 /* skb->len reflects data in skb as well as all fragments
5495 * skb->data_len reflects only data in fragments
5497 if (!skb_has_frag_list(skb))
5498 skb_shinfo(skb)->frag_list = new_frag;
5500 new_frag->next = NULL;
5502 (*last_frag)->next = new_frag;
5503 *last_frag = new_frag;
5505 skb->len += new_frag->len;
5506 skb->data_len += new_frag->len;
5507 skb->truesize += new_frag->truesize;
5510 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5511 struct l2cap_ctrl *control)
5515 switch (control->sar) {
5516 case L2CAP_SAR_UNSEGMENTED:
5520 err = chan->ops->recv(chan, skb);
5523 case L2CAP_SAR_START:
5527 chan->sdu_len = get_unaligned_le16(skb->data);
5528 skb_pull(skb, L2CAP_SDULEN_SIZE);
5530 if (chan->sdu_len > chan->imtu) {
5535 if (skb->len >= chan->sdu_len)
5539 chan->sdu_last_frag = skb;
5545 case L2CAP_SAR_CONTINUE:
5549 append_skb_frag(chan->sdu, skb,
5550 &chan->sdu_last_frag);
5553 if (chan->sdu->len >= chan->sdu_len)
5563 append_skb_frag(chan->sdu, skb,
5564 &chan->sdu_last_frag);
5567 if (chan->sdu->len != chan->sdu_len)
5570 err = chan->ops->recv(chan, chan->sdu);
5573 /* Reassembly complete */
5575 chan->sdu_last_frag = NULL;
5583 kfree_skb(chan->sdu);
5585 chan->sdu_last_frag = NULL;
5592 static int l2cap_resegment(struct l2cap_chan *chan)
5598 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5602 if (chan->mode != L2CAP_MODE_ERTM)
5605 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5606 l2cap_tx(chan, NULL, NULL, event);
5609 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5612 /* Pass sequential frames to l2cap_reassemble_sdu()
5613 * until a gap is encountered.
5616 BT_DBG("chan %p", chan);
5618 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5619 struct sk_buff *skb;
5620 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5621 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5623 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5628 skb_unlink(skb, &chan->srej_q);
5629 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5630 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5635 if (skb_queue_empty(&chan->srej_q)) {
5636 chan->rx_state = L2CAP_RX_STATE_RECV;
5637 l2cap_send_ack(chan);
5643 static void l2cap_handle_srej(struct l2cap_chan *chan,
5644 struct l2cap_ctrl *control)
5646 struct sk_buff *skb;
5648 BT_DBG("chan %p, control %p", chan, control);
5650 if (control->reqseq == chan->next_tx_seq) {
5651 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5652 l2cap_send_disconn_req(chan, ECONNRESET);
5656 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5659 BT_DBG("Seq %d not available for retransmission",
5664 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5665 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5666 l2cap_send_disconn_req(chan, ECONNRESET);
5670 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5672 if (control->poll) {
5673 l2cap_pass_to_tx(chan, control);
5675 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5676 l2cap_retransmit(chan, control);
5677 l2cap_ertm_send(chan);
5679 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5680 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5681 chan->srej_save_reqseq = control->reqseq;
5684 l2cap_pass_to_tx_fbit(chan, control);
5686 if (control->final) {
5687 if (chan->srej_save_reqseq != control->reqseq ||
5688 !test_and_clear_bit(CONN_SREJ_ACT,
5690 l2cap_retransmit(chan, control);
5692 l2cap_retransmit(chan, control);
5693 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5694 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5695 chan->srej_save_reqseq = control->reqseq;
5701 static void l2cap_handle_rej(struct l2cap_chan *chan,
5702 struct l2cap_ctrl *control)
5704 struct sk_buff *skb;
5706 BT_DBG("chan %p, control %p", chan, control);
5708 if (control->reqseq == chan->next_tx_seq) {
5709 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5710 l2cap_send_disconn_req(chan, ECONNRESET);
5714 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5716 if (chan->max_tx && skb &&
5717 bt_cb(skb)->control.retries >= chan->max_tx) {
5718 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5719 l2cap_send_disconn_req(chan, ECONNRESET);
5723 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5725 l2cap_pass_to_tx(chan, control);
5727 if (control->final) {
5728 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5729 l2cap_retransmit_all(chan, control);
5731 l2cap_retransmit_all(chan, control);
5732 l2cap_ertm_send(chan);
5733 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5734 set_bit(CONN_REJ_ACT, &chan->conn_state);
5738 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5740 BT_DBG("chan %p, txseq %d", chan, txseq);
5742 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5743 chan->expected_tx_seq);
5745 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5746 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5748 /* See notes below regarding "double poll" and
5751 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5752 BT_DBG("Invalid/Ignore - after SREJ");
5753 return L2CAP_TXSEQ_INVALID_IGNORE;
5755 BT_DBG("Invalid - in window after SREJ sent");
5756 return L2CAP_TXSEQ_INVALID;
5760 if (chan->srej_list.head == txseq) {
5761 BT_DBG("Expected SREJ");
5762 return L2CAP_TXSEQ_EXPECTED_SREJ;
5765 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5766 BT_DBG("Duplicate SREJ - txseq already stored");
5767 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5770 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5771 BT_DBG("Unexpected SREJ - not requested");
5772 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5776 if (chan->expected_tx_seq == txseq) {
5777 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5779 BT_DBG("Invalid - txseq outside tx window");
5780 return L2CAP_TXSEQ_INVALID;
5783 return L2CAP_TXSEQ_EXPECTED;
5787 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5788 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5789 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5790 return L2CAP_TXSEQ_DUPLICATE;
5793 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5794 /* A source of invalid packets is a "double poll" condition,
5795 * where delays cause us to send multiple poll packets. If
5796 * the remote stack receives and processes both polls,
5797 * sequence numbers can wrap around in such a way that a
5798 * resent frame has a sequence number that looks like new data
5799 * with a sequence gap. This would trigger an erroneous SREJ
5802 * Fortunately, this is impossible with a tx window that's
5803 * less than half of the maximum sequence number, which allows
5804 * invalid frames to be safely ignored.
5806 * With tx window sizes greater than half of the tx window
5807 * maximum, the frame is invalid and cannot be ignored. This
5808 * causes a disconnect.
5811 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5812 BT_DBG("Invalid/Ignore - txseq outside tx window");
5813 return L2CAP_TXSEQ_INVALID_IGNORE;
5815 BT_DBG("Invalid - txseq outside tx window");
5816 return L2CAP_TXSEQ_INVALID;
5819 BT_DBG("Unexpected - txseq indicates missing frames");
5820 return L2CAP_TXSEQ_UNEXPECTED;
5824 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5825 struct l2cap_ctrl *control,
5826 struct sk_buff *skb, u8 event)
5829 bool skb_in_use = false;
5831 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5835 case L2CAP_EV_RECV_IFRAME:
5836 switch (l2cap_classify_txseq(chan, control->txseq)) {
5837 case L2CAP_TXSEQ_EXPECTED:
5838 l2cap_pass_to_tx(chan, control);
5840 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5841 BT_DBG("Busy, discarding expected seq %d",
5846 chan->expected_tx_seq = __next_seq(chan,
5849 chan->buffer_seq = chan->expected_tx_seq;
5852 err = l2cap_reassemble_sdu(chan, skb, control);
5856 if (control->final) {
5857 if (!test_and_clear_bit(CONN_REJ_ACT,
5858 &chan->conn_state)) {
5860 l2cap_retransmit_all(chan, control);
5861 l2cap_ertm_send(chan);
5865 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5866 l2cap_send_ack(chan);
5868 case L2CAP_TXSEQ_UNEXPECTED:
5869 l2cap_pass_to_tx(chan, control);
5871 /* Can't issue SREJ frames in the local busy state.
5872 * Drop this frame, it will be seen as missing
5873 * when local busy is exited.
5875 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5876 BT_DBG("Busy, discarding unexpected seq %d",
5881 /* There was a gap in the sequence, so an SREJ
5882 * must be sent for each missing frame. The
5883 * current frame is stored for later use.
5885 skb_queue_tail(&chan->srej_q, skb);
5887 BT_DBG("Queued %p (queue len %d)", skb,
5888 skb_queue_len(&chan->srej_q));
5890 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5891 l2cap_seq_list_clear(&chan->srej_list);
5892 l2cap_send_srej(chan, control->txseq);
5894 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5896 case L2CAP_TXSEQ_DUPLICATE:
5897 l2cap_pass_to_tx(chan, control);
5899 case L2CAP_TXSEQ_INVALID_IGNORE:
5901 case L2CAP_TXSEQ_INVALID:
5903 l2cap_send_disconn_req(chan, ECONNRESET);
5907 case L2CAP_EV_RECV_RR:
5908 l2cap_pass_to_tx(chan, control);
5909 if (control->final) {
5910 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5912 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5913 !__chan_is_moving(chan)) {
5915 l2cap_retransmit_all(chan, control);
5918 l2cap_ertm_send(chan);
5919 } else if (control->poll) {
5920 l2cap_send_i_or_rr_or_rnr(chan);
5922 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5923 &chan->conn_state) &&
5924 chan->unacked_frames)
5925 __set_retrans_timer(chan);
5927 l2cap_ertm_send(chan);
5930 case L2CAP_EV_RECV_RNR:
5931 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5932 l2cap_pass_to_tx(chan, control);
5933 if (control && control->poll) {
5934 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5935 l2cap_send_rr_or_rnr(chan, 0);
5937 __clear_retrans_timer(chan);
5938 l2cap_seq_list_clear(&chan->retrans_list);
5940 case L2CAP_EV_RECV_REJ:
5941 l2cap_handle_rej(chan, control);
5943 case L2CAP_EV_RECV_SREJ:
5944 l2cap_handle_srej(chan, control);
5950 if (skb && !skb_in_use) {
5951 BT_DBG("Freeing %p", skb);
5958 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5959 struct l2cap_ctrl *control,
5960 struct sk_buff *skb, u8 event)
5963 u16 txseq = control->txseq;
5964 bool skb_in_use = false;
5966 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5970 case L2CAP_EV_RECV_IFRAME:
5971 switch (l2cap_classify_txseq(chan, txseq)) {
5972 case L2CAP_TXSEQ_EXPECTED:
5973 /* Keep frame for reassembly later */
5974 l2cap_pass_to_tx(chan, control);
5975 skb_queue_tail(&chan->srej_q, skb);
5977 BT_DBG("Queued %p (queue len %d)", skb,
5978 skb_queue_len(&chan->srej_q));
5980 chan->expected_tx_seq = __next_seq(chan, txseq);
5982 case L2CAP_TXSEQ_EXPECTED_SREJ:
5983 l2cap_seq_list_pop(&chan->srej_list);
5985 l2cap_pass_to_tx(chan, control);
5986 skb_queue_tail(&chan->srej_q, skb);
5988 BT_DBG("Queued %p (queue len %d)", skb,
5989 skb_queue_len(&chan->srej_q));
5991 err = l2cap_rx_queued_iframes(chan);
5996 case L2CAP_TXSEQ_UNEXPECTED:
5997 /* Got a frame that can't be reassembled yet.
5998 * Save it for later, and send SREJs to cover
5999 * the missing frames.
6001 skb_queue_tail(&chan->srej_q, skb);
6003 BT_DBG("Queued %p (queue len %d)", skb,
6004 skb_queue_len(&chan->srej_q));
6006 l2cap_pass_to_tx(chan, control);
6007 l2cap_send_srej(chan, control->txseq);
6009 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6010 /* This frame was requested with an SREJ, but
6011 * some expected retransmitted frames are
6012 * missing. Request retransmission of missing
6015 skb_queue_tail(&chan->srej_q, skb);
6017 BT_DBG("Queued %p (queue len %d)", skb,
6018 skb_queue_len(&chan->srej_q));
6020 l2cap_pass_to_tx(chan, control);
6021 l2cap_send_srej_list(chan, control->txseq);
6023 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6024 /* We've already queued this frame. Drop this copy. */
6025 l2cap_pass_to_tx(chan, control);
6027 case L2CAP_TXSEQ_DUPLICATE:
6028 /* Expecting a later sequence number, so this frame
6029 * was already received. Ignore it completely.
6032 case L2CAP_TXSEQ_INVALID_IGNORE:
6034 case L2CAP_TXSEQ_INVALID:
6036 l2cap_send_disconn_req(chan, ECONNRESET);
6040 case L2CAP_EV_RECV_RR:
6041 l2cap_pass_to_tx(chan, control);
6042 if (control->final) {
6043 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6045 if (!test_and_clear_bit(CONN_REJ_ACT,
6046 &chan->conn_state)) {
6048 l2cap_retransmit_all(chan, control);
6051 l2cap_ertm_send(chan);
6052 } else if (control->poll) {
6053 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6054 &chan->conn_state) &&
6055 chan->unacked_frames) {
6056 __set_retrans_timer(chan);
6059 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6060 l2cap_send_srej_tail(chan);
6062 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6063 &chan->conn_state) &&
6064 chan->unacked_frames)
6065 __set_retrans_timer(chan);
6067 l2cap_send_ack(chan);
6070 case L2CAP_EV_RECV_RNR:
6071 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6072 l2cap_pass_to_tx(chan, control);
6073 if (control->poll) {
6074 l2cap_send_srej_tail(chan);
6076 struct l2cap_ctrl rr_control;
6077 memset(&rr_control, 0, sizeof(rr_control));
6078 rr_control.sframe = 1;
6079 rr_control.super = L2CAP_SUPER_RR;
6080 rr_control.reqseq = chan->buffer_seq;
6081 l2cap_send_sframe(chan, &rr_control);
6085 case L2CAP_EV_RECV_REJ:
6086 l2cap_handle_rej(chan, control);
6088 case L2CAP_EV_RECV_SREJ:
6089 l2cap_handle_srej(chan, control);
6093 if (skb && !skb_in_use) {
6094 BT_DBG("Freeing %p", skb);
6101 static int l2cap_finish_move(struct l2cap_chan *chan)
6103 BT_DBG("chan %p", chan);
6105 chan->rx_state = L2CAP_RX_STATE_RECV;
6108 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6110 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6112 return l2cap_resegment(chan);
6115 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6116 struct l2cap_ctrl *control,
6117 struct sk_buff *skb, u8 event)
6121 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6127 l2cap_process_reqseq(chan, control->reqseq);
6129 if (!skb_queue_empty(&chan->tx_q))
6130 chan->tx_send_head = skb_peek(&chan->tx_q);
6132 chan->tx_send_head = NULL;
6134 /* Rewind next_tx_seq to the point expected
6137 chan->next_tx_seq = control->reqseq;
6138 chan->unacked_frames = 0;
6140 err = l2cap_finish_move(chan);
6144 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6145 l2cap_send_i_or_rr_or_rnr(chan);
6147 if (event == L2CAP_EV_RECV_IFRAME)
6150 return l2cap_rx_state_recv(chan, control, NULL, event);
6153 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6154 struct l2cap_ctrl *control,
6155 struct sk_buff *skb, u8 event)
6159 if (!control->final)
6162 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6164 chan->rx_state = L2CAP_RX_STATE_RECV;
6165 l2cap_process_reqseq(chan, control->reqseq);
6167 if (!skb_queue_empty(&chan->tx_q))
6168 chan->tx_send_head = skb_peek(&chan->tx_q);
6170 chan->tx_send_head = NULL;
6172 /* Rewind next_tx_seq to the point expected
6175 chan->next_tx_seq = control->reqseq;
6176 chan->unacked_frames = 0;
6179 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6181 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6183 err = l2cap_resegment(chan);
6186 err = l2cap_rx_state_recv(chan, control, skb, event);
6191 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6193 /* Make sure reqseq is for a packet that has been sent but not acked */
6196 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6197 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6200 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6201 struct sk_buff *skb, u8 event)
6205 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6206 control, skb, event, chan->rx_state);
6208 if (__valid_reqseq(chan, control->reqseq)) {
6209 switch (chan->rx_state) {
6210 case L2CAP_RX_STATE_RECV:
6211 err = l2cap_rx_state_recv(chan, control, skb, event);
6213 case L2CAP_RX_STATE_SREJ_SENT:
6214 err = l2cap_rx_state_srej_sent(chan, control, skb,
6217 case L2CAP_RX_STATE_WAIT_P:
6218 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6220 case L2CAP_RX_STATE_WAIT_F:
6221 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6228 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6229 control->reqseq, chan->next_tx_seq,
6230 chan->expected_ack_seq);
6231 l2cap_send_disconn_req(chan, ECONNRESET);
6237 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6238 struct sk_buff *skb)
6242 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6245 if (l2cap_classify_txseq(chan, control->txseq) ==
6246 L2CAP_TXSEQ_EXPECTED) {
6247 l2cap_pass_to_tx(chan, control);
6249 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6250 __next_seq(chan, chan->buffer_seq));
6252 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6254 l2cap_reassemble_sdu(chan, skb, control);
6257 kfree_skb(chan->sdu);
6260 chan->sdu_last_frag = NULL;
6264 BT_DBG("Freeing %p", skb);
6269 chan->last_acked_seq = control->txseq;
6270 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6275 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6277 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6281 __unpack_control(chan, skb);
6286 * We can just drop the corrupted I-frame here.
6287 * Receiver will miss it and start proper recovery
6288 * procedures and ask for retransmission.
6290 if (l2cap_check_fcs(chan, skb))
6293 if (!control->sframe && control->sar == L2CAP_SAR_START)
6294 len -= L2CAP_SDULEN_SIZE;
6296 if (chan->fcs == L2CAP_FCS_CRC16)
6297 len -= L2CAP_FCS_SIZE;
6299 if (len > chan->mps) {
6300 l2cap_send_disconn_req(chan, ECONNRESET);
6304 if (!control->sframe) {
6307 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6308 control->sar, control->reqseq, control->final,
6311 /* Validate F-bit - F=0 always valid, F=1 only
6312 * valid in TX WAIT_F
6314 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6317 if (chan->mode != L2CAP_MODE_STREAMING) {
6318 event = L2CAP_EV_RECV_IFRAME;
6319 err = l2cap_rx(chan, control, skb, event);
6321 err = l2cap_stream_rx(chan, control, skb);
6325 l2cap_send_disconn_req(chan, ECONNRESET);
6327 const u8 rx_func_to_event[4] = {
6328 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6329 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6332 /* Only I-frames are expected in streaming mode */
6333 if (chan->mode == L2CAP_MODE_STREAMING)
6336 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6337 control->reqseq, control->final, control->poll,
6341 BT_ERR("Trailing bytes: %d in sframe", len);
6342 l2cap_send_disconn_req(chan, ECONNRESET);
6346 /* Validate F and P bits */
6347 if (control->final && (control->poll ||
6348 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6351 event = rx_func_to_event[control->super];
6352 if (l2cap_rx(chan, control, skb, event))
6353 l2cap_send_disconn_req(chan, ECONNRESET);
6363 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6364 struct sk_buff *skb)
6366 struct l2cap_chan *chan;
6368 chan = l2cap_get_chan_by_scid(conn, cid);
6370 if (cid == L2CAP_CID_A2MP) {
6371 chan = a2mp_channel_create(conn, skb);
6377 l2cap_chan_lock(chan);
6379 BT_DBG("unknown cid 0x%4.4x", cid);
6380 /* Drop packet and return */
6386 BT_DBG("chan %p, len %d", chan, skb->len);
6388 if (chan->state != BT_CONNECTED)
6391 switch (chan->mode) {
6392 case L2CAP_MODE_BASIC:
6393 /* If socket recv buffers overflows we drop data here
6394 * which is *bad* because L2CAP has to be reliable.
6395 * But we don't have any other choice. L2CAP doesn't
6396 * provide flow control mechanism. */
6398 if (chan->imtu < skb->len)
6401 if (!chan->ops->recv(chan, skb))
6405 case L2CAP_MODE_ERTM:
6406 case L2CAP_MODE_STREAMING:
6407 l2cap_data_rcv(chan, skb);
6411 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6419 l2cap_chan_unlock(chan);
6422 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6423 struct sk_buff *skb)
6425 struct hci_conn *hcon = conn->hcon;
6426 struct l2cap_chan *chan;
6428 if (hcon->type != ACL_LINK)
6431 chan = l2cap_global_chan_by_psm(0, psm, &conn->hcon->src,
6436 BT_DBG("chan %p, len %d", chan, skb->len);
6438 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6441 if (chan->imtu < skb->len)
6444 if (!chan->ops->recv(chan, skb))
6451 static void l2cap_att_channel(struct l2cap_conn *conn,
6452 struct sk_buff *skb)
6454 struct hci_conn *hcon = conn->hcon;
6455 struct l2cap_chan *chan;
6457 if (hcon->type != LE_LINK)
6460 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6461 &conn->hcon->src, &conn->hcon->dst);
6465 BT_DBG("chan %p, len %d", chan, skb->len);
6467 if (chan->imtu < skb->len)
6470 if (!chan->ops->recv(chan, skb))
6477 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6479 struct l2cap_hdr *lh = (void *) skb->data;
6483 skb_pull(skb, L2CAP_HDR_SIZE);
6484 cid = __le16_to_cpu(lh->cid);
6485 len = __le16_to_cpu(lh->len);
6487 if (len != skb->len) {
6492 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6495 case L2CAP_CID_SIGNALING:
6496 l2cap_sig_channel(conn, skb);
6499 case L2CAP_CID_CONN_LESS:
6500 psm = get_unaligned((__le16 *) skb->data);
6501 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6502 l2cap_conless_channel(conn, psm, skb);
6506 l2cap_att_channel(conn, skb);
6509 case L2CAP_CID_LE_SIGNALING:
6510 l2cap_le_sig_channel(conn, skb);
6514 if (smp_sig_channel(conn, skb))
6515 l2cap_conn_del(conn->hcon, EACCES);
6519 l2cap_data_channel(conn, cid, skb);
6524 /* ---- L2CAP interface with lower layer (HCI) ---- */
6526 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6528 int exact = 0, lm1 = 0, lm2 = 0;
6529 struct l2cap_chan *c;
6531 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6533 /* Find listening sockets and check their link_mode */
6534 read_lock(&chan_list_lock);
6535 list_for_each_entry(c, &chan_list, global_l) {
6536 if (c->state != BT_LISTEN)
6539 if (!bacmp(&c->src, &hdev->bdaddr)) {
6540 lm1 |= HCI_LM_ACCEPT;
6541 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6542 lm1 |= HCI_LM_MASTER;
6544 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6545 lm2 |= HCI_LM_ACCEPT;
6546 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6547 lm2 |= HCI_LM_MASTER;
6550 read_unlock(&chan_list_lock);
6552 return exact ? lm1 : lm2;
6555 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6557 struct l2cap_conn *conn;
6559 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6562 conn = l2cap_conn_add(hcon);
6564 l2cap_conn_ready(conn);
6566 l2cap_conn_del(hcon, bt_to_errno(status));
6570 int l2cap_disconn_ind(struct hci_conn *hcon)
6572 struct l2cap_conn *conn = hcon->l2cap_data;
6574 BT_DBG("hcon %p", hcon);
6577 return HCI_ERROR_REMOTE_USER_TERM;
6578 return conn->disc_reason;
6581 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6583 BT_DBG("hcon %p reason %d", hcon, reason);
6585 l2cap_conn_del(hcon, bt_to_errno(reason));
6588 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6590 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6593 if (encrypt == 0x00) {
6594 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6595 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6596 } else if (chan->sec_level == BT_SECURITY_HIGH)
6597 l2cap_chan_close(chan, ECONNREFUSED);
6599 if (chan->sec_level == BT_SECURITY_MEDIUM)
6600 __clear_chan_timer(chan);
6604 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6606 struct l2cap_conn *conn = hcon->l2cap_data;
6607 struct l2cap_chan *chan;
6612 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6614 if (hcon->type == LE_LINK) {
6615 if (!status && encrypt)
6616 smp_distribute_keys(conn, 0);
6617 cancel_delayed_work(&conn->security_timer);
6620 mutex_lock(&conn->chan_lock);
6622 list_for_each_entry(chan, &conn->chan_l, list) {
6623 l2cap_chan_lock(chan);
6625 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6626 state_to_string(chan->state));
6628 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6629 l2cap_chan_unlock(chan);
6633 if (chan->scid == L2CAP_CID_ATT) {
6634 if (!status && encrypt) {
6635 chan->sec_level = hcon->sec_level;
6636 l2cap_chan_ready(chan);
6639 l2cap_chan_unlock(chan);
6643 if (!__l2cap_no_conn_pending(chan)) {
6644 l2cap_chan_unlock(chan);
6648 if (!status && (chan->state == BT_CONNECTED ||
6649 chan->state == BT_CONFIG)) {
6650 struct sock *sk = chan->sk;
6652 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6653 sk->sk_state_change(sk);
6655 l2cap_check_encryption(chan, encrypt);
6656 l2cap_chan_unlock(chan);
6660 if (chan->state == BT_CONNECT) {
6662 l2cap_start_connection(chan);
6664 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6666 } else if (chan->state == BT_CONNECT2) {
6667 struct sock *sk = chan->sk;
6668 struct l2cap_conn_rsp rsp;
6674 if (test_bit(BT_SK_DEFER_SETUP,
6675 &bt_sk(sk)->flags)) {
6676 res = L2CAP_CR_PEND;
6677 stat = L2CAP_CS_AUTHOR_PEND;
6678 chan->ops->defer(chan);
6680 __l2cap_state_change(chan, BT_CONFIG);
6681 res = L2CAP_CR_SUCCESS;
6682 stat = L2CAP_CS_NO_INFO;
6685 __l2cap_state_change(chan, BT_DISCONN);
6686 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6687 res = L2CAP_CR_SEC_BLOCK;
6688 stat = L2CAP_CS_NO_INFO;
6693 rsp.scid = cpu_to_le16(chan->dcid);
6694 rsp.dcid = cpu_to_le16(chan->scid);
6695 rsp.result = cpu_to_le16(res);
6696 rsp.status = cpu_to_le16(stat);
6697 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6700 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6701 res == L2CAP_CR_SUCCESS) {
6703 set_bit(CONF_REQ_SENT, &chan->conf_state);
6704 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6706 l2cap_build_conf_req(chan, buf),
6708 chan->num_conf_req++;
6712 l2cap_chan_unlock(chan);
6715 mutex_unlock(&conn->chan_lock);
6720 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6722 struct l2cap_conn *conn = hcon->l2cap_data;
6723 struct l2cap_hdr *hdr;
6726 /* For AMP controller do not create l2cap conn */
6727 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6731 conn = l2cap_conn_add(hcon);
6736 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6740 case ACL_START_NO_FLUSH:
6743 BT_ERR("Unexpected start frame (len %d)", skb->len);
6744 kfree_skb(conn->rx_skb);
6745 conn->rx_skb = NULL;
6747 l2cap_conn_unreliable(conn, ECOMM);
6750 /* Start fragment always begin with Basic L2CAP header */
6751 if (skb->len < L2CAP_HDR_SIZE) {
6752 BT_ERR("Frame is too short (len %d)", skb->len);
6753 l2cap_conn_unreliable(conn, ECOMM);
6757 hdr = (struct l2cap_hdr *) skb->data;
6758 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6760 if (len == skb->len) {
6761 /* Complete frame received */
6762 l2cap_recv_frame(conn, skb);
6766 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6768 if (skb->len > len) {
6769 BT_ERR("Frame is too long (len %d, expected len %d)",
6771 l2cap_conn_unreliable(conn, ECOMM);
6775 /* Allocate skb for the complete frame (with header) */
6776 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6780 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6782 conn->rx_len = len - skb->len;
6786 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6788 if (!conn->rx_len) {
6789 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6790 l2cap_conn_unreliable(conn, ECOMM);
6794 if (skb->len > conn->rx_len) {
6795 BT_ERR("Fragment is too long (len %d, expected %d)",
6796 skb->len, conn->rx_len);
6797 kfree_skb(conn->rx_skb);
6798 conn->rx_skb = NULL;
6800 l2cap_conn_unreliable(conn, ECOMM);
6804 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6806 conn->rx_len -= skb->len;
6808 if (!conn->rx_len) {
6809 /* Complete frame received. l2cap_recv_frame
6810 * takes ownership of the skb so set the global
6811 * rx_skb pointer to NULL first.
6813 struct sk_buff *rx_skb = conn->rx_skb;
6814 conn->rx_skb = NULL;
6815 l2cap_recv_frame(conn, rx_skb);
6825 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6827 struct l2cap_chan *c;
6829 read_lock(&chan_list_lock);
6831 list_for_each_entry(c, &chan_list, global_l) {
6832 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6834 c->state, __le16_to_cpu(c->psm),
6835 c->scid, c->dcid, c->imtu, c->omtu,
6836 c->sec_level, c->mode);
6839 read_unlock(&chan_list_lock);
6844 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6846 return single_open(file, l2cap_debugfs_show, inode->i_private);
6849 static const struct file_operations l2cap_debugfs_fops = {
6850 .open = l2cap_debugfs_open,
6852 .llseek = seq_lseek,
6853 .release = single_release,
6856 static struct dentry *l2cap_debugfs;
6858 int __init l2cap_init(void)
6862 err = l2cap_init_sockets();
6867 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6868 NULL, &l2cap_debugfs_fops);
6870 BT_ERR("Failed to create L2CAP debug file");
6876 void l2cap_exit(void)
6878 debugfs_remove(l2cap_debugfs);
6879 l2cap_cleanup_sockets();
6882 module_param(disable_ertm, bool, 0644);
6883 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");