2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 /* ---- L2CAP channels ---- */
64 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
69 list_for_each_entry(c, &conn->chan_l, list) {
76 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 /* Find channel with given SCID.
89 * Returns locked channel. */
90 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 mutex_lock(&conn->chan_lock);
96 c = __l2cap_get_chan_by_scid(conn, cid);
99 mutex_unlock(&conn->chan_lock);
104 /* Find channel with given DCID.
105 * Returns locked channel.
107 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
110 struct l2cap_chan *c;
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_dcid(conn, cid);
116 mutex_unlock(&conn->chan_lock);
121 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
124 struct l2cap_chan *c;
126 list_for_each_entry(c, &conn->chan_l, list) {
127 if (c->ident == ident)
133 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_ident(conn, ident);
142 mutex_unlock(&conn->chan_lock);
147 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
149 struct l2cap_chan *c;
151 list_for_each_entry(c, &chan_list, global_l) {
152 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
158 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
162 write_lock(&chan_list_lock);
164 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 for (p = 0x1001; p < 0x1100; p += 2)
178 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
179 chan->psm = cpu_to_le16(p);
180 chan->sport = cpu_to_le16(p);
187 write_unlock(&chan_list_lock);
191 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
193 write_lock(&chan_list_lock);
197 write_unlock(&chan_list_lock);
202 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
204 u16 cid = L2CAP_CID_DYN_START;
206 for (; cid < L2CAP_CID_DYN_END; cid++) {
207 if (!__l2cap_get_chan_by_scid(conn, cid))
214 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
216 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
217 state_to_string(state));
220 chan->ops->state_change(chan, state);
223 static void l2cap_state_change(struct l2cap_chan *chan, int state)
225 struct sock *sk = chan->sk;
228 __l2cap_state_change(chan, state);
232 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
234 struct sock *sk = chan->sk;
239 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
241 struct sock *sk = chan->sk;
244 __l2cap_chan_set_err(chan, err);
248 static void __set_retrans_timer(struct l2cap_chan *chan)
250 if (!delayed_work_pending(&chan->monitor_timer) &&
251 chan->retrans_timeout) {
252 l2cap_set_timer(chan, &chan->retrans_timer,
253 msecs_to_jiffies(chan->retrans_timeout));
257 static void __set_monitor_timer(struct l2cap_chan *chan)
259 __clear_retrans_timer(chan);
260 if (chan->monitor_timeout) {
261 l2cap_set_timer(chan, &chan->monitor_timer,
262 msecs_to_jiffies(chan->monitor_timeout));
266 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
271 skb_queue_walk(head, skb) {
272 if (bt_cb(skb)->control.txseq == seq)
279 /* ---- L2CAP sequence number lists ---- */
281 /* For ERTM, ordered lists of sequence numbers must be tracked for
282 * SREJ requests that are received and for frames that are to be
283 * retransmitted. These seq_list functions implement a singly-linked
284 * list in an array, where membership in the list can also be checked
285 * in constant time. Items can also be added to the tail of the list
286 * and removed from the head in constant time, without further memory
290 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
292 size_t alloc_size, i;
294 /* Allocated size is a power of 2 to map sequence numbers
295 * (which may be up to 14 bits) in to a smaller array that is
296 * sized for the negotiated ERTM transmit windows.
298 alloc_size = roundup_pow_of_two(size);
300 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
304 seq_list->mask = alloc_size - 1;
305 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
306 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
307 for (i = 0; i < alloc_size; i++)
308 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
313 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
315 kfree(seq_list->list);
318 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
321 /* Constant-time check for list membership */
322 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
325 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
327 u16 mask = seq_list->mask;
329 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
330 /* In case someone tries to pop the head of an empty list */
331 return L2CAP_SEQ_LIST_CLEAR;
332 } else if (seq_list->head == seq) {
333 /* Head can be removed in constant time */
334 seq_list->head = seq_list->list[seq & mask];
335 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
337 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
342 /* Walk the list to find the sequence number */
343 u16 prev = seq_list->head;
344 while (seq_list->list[prev & mask] != seq) {
345 prev = seq_list->list[prev & mask];
346 if (prev == L2CAP_SEQ_LIST_TAIL)
347 return L2CAP_SEQ_LIST_CLEAR;
350 /* Unlink the number from the list and clear it */
351 seq_list->list[prev & mask] = seq_list->list[seq & mask];
352 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
353 if (seq_list->tail == seq)
354 seq_list->tail = prev;
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
361 /* Remove the head in constant time */
362 return l2cap_seq_list_remove(seq_list, seq_list->head);
365 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
369 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
372 for (i = 0; i <= seq_list->mask; i++)
373 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
376 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
379 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
381 u16 mask = seq_list->mask;
383 /* All appends happen in constant time */
385 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
388 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
389 seq_list->head = seq;
391 seq_list->list[seq_list->tail & mask] = seq;
393 seq_list->tail = seq;
394 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
397 static void l2cap_chan_timeout(struct work_struct *work)
399 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
401 struct l2cap_conn *conn = chan->conn;
404 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
406 mutex_lock(&conn->chan_lock);
407 l2cap_chan_lock(chan);
409 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
410 reason = ECONNREFUSED;
411 else if (chan->state == BT_CONNECT &&
412 chan->sec_level != BT_SECURITY_SDP)
413 reason = ECONNREFUSED;
417 l2cap_chan_close(chan, reason);
419 l2cap_chan_unlock(chan);
421 chan->ops->close(chan);
422 mutex_unlock(&conn->chan_lock);
424 l2cap_chan_put(chan);
427 struct l2cap_chan *l2cap_chan_create(void)
429 struct l2cap_chan *chan;
431 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
435 mutex_init(&chan->lock);
437 write_lock(&chan_list_lock);
438 list_add(&chan->global_l, &chan_list);
439 write_unlock(&chan_list_lock);
441 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
443 chan->state = BT_OPEN;
445 kref_init(&chan->kref);
447 /* This flag is cleared in l2cap_chan_ready() */
448 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
450 BT_DBG("chan %p", chan);
455 static void l2cap_chan_destroy(struct kref *kref)
457 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
459 BT_DBG("chan %p", chan);
461 write_lock(&chan_list_lock);
462 list_del(&chan->global_l);
463 write_unlock(&chan_list_lock);
468 void l2cap_chan_hold(struct l2cap_chan *c)
470 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
475 void l2cap_chan_put(struct l2cap_chan *c)
477 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
479 kref_put(&c->kref, l2cap_chan_destroy);
482 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
484 chan->fcs = L2CAP_FCS_CRC16;
485 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
486 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
487 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
488 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
489 chan->sec_level = BT_SECURITY_LOW;
491 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
494 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
496 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
497 __le16_to_cpu(chan->psm), chan->dcid);
499 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
503 switch (chan->chan_type) {
504 case L2CAP_CHAN_CONN_ORIENTED:
505 if (conn->hcon->type == LE_LINK) {
507 chan->omtu = L2CAP_DEFAULT_MTU;
508 if (chan->dcid == L2CAP_CID_ATT)
509 chan->scid = L2CAP_CID_ATT;
511 chan->scid = l2cap_alloc_cid(conn);
513 /* Alloc CID for connection-oriented socket */
514 chan->scid = l2cap_alloc_cid(conn);
515 chan->omtu = L2CAP_DEFAULT_MTU;
519 case L2CAP_CHAN_CONN_LESS:
520 /* Connectionless socket */
521 chan->scid = L2CAP_CID_CONN_LESS;
522 chan->dcid = L2CAP_CID_CONN_LESS;
523 chan->omtu = L2CAP_DEFAULT_MTU;
526 case L2CAP_CHAN_CONN_FIX_A2MP:
527 chan->scid = L2CAP_CID_A2MP;
528 chan->dcid = L2CAP_CID_A2MP;
529 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
530 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
534 /* Raw socket can send/recv signalling messages only */
535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
537 chan->omtu = L2CAP_DEFAULT_MTU;
540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
547 l2cap_chan_hold(chan);
549 hci_conn_hold(conn->hcon);
551 list_add(&chan->list, &conn->chan_l);
554 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
556 mutex_lock(&conn->chan_lock);
557 __l2cap_chan_add(conn, chan);
558 mutex_unlock(&conn->chan_lock);
561 void l2cap_chan_del(struct l2cap_chan *chan, int err)
563 struct l2cap_conn *conn = chan->conn;
565 __clear_chan_timer(chan);
567 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
570 struct amp_mgr *mgr = conn->hcon->amp_mgr;
571 /* Delete from channel list */
572 list_del(&chan->list);
574 l2cap_chan_put(chan);
578 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
579 hci_conn_drop(conn->hcon);
581 if (mgr && mgr->bredr_chan == chan)
582 mgr->bredr_chan = NULL;
585 if (chan->hs_hchan) {
586 struct hci_chan *hs_hchan = chan->hs_hchan;
588 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
589 amp_disconnect_logical_link(hs_hchan);
592 chan->ops->teardown(chan, err);
594 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
598 case L2CAP_MODE_BASIC:
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
606 skb_queue_purge(&chan->srej_q);
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
621 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
623 struct l2cap_conn *conn = chan->conn;
624 struct sock *sk = chan->sk;
626 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
629 switch (chan->state) {
631 chan->ops->teardown(chan, 0);
636 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
637 conn->hcon->type == ACL_LINK) {
638 __set_chan_timer(chan, sk->sk_sndtimeo);
639 l2cap_send_disconn_req(chan, reason);
641 l2cap_chan_del(chan, reason);
645 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
646 conn->hcon->type == ACL_LINK) {
647 struct l2cap_conn_rsp rsp;
650 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
651 result = L2CAP_CR_SEC_BLOCK;
653 result = L2CAP_CR_BAD_PSM;
654 l2cap_state_change(chan, BT_DISCONN);
656 rsp.scid = cpu_to_le16(chan->dcid);
657 rsp.dcid = cpu_to_le16(chan->scid);
658 rsp.result = cpu_to_le16(result);
659 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
660 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
664 l2cap_chan_del(chan, reason);
669 l2cap_chan_del(chan, reason);
673 chan->ops->teardown(chan, 0);
678 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
680 switch (chan->chan_type) {
682 switch (chan->sec_level) {
683 case BT_SECURITY_HIGH:
684 return HCI_AT_DEDICATED_BONDING_MITM;
685 case BT_SECURITY_MEDIUM:
686 return HCI_AT_DEDICATED_BONDING;
688 return HCI_AT_NO_BONDING;
691 case L2CAP_CHAN_CONN_LESS:
692 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
693 if (chan->sec_level == BT_SECURITY_LOW)
694 chan->sec_level = BT_SECURITY_SDP;
696 if (chan->sec_level == BT_SECURITY_HIGH)
697 return HCI_AT_NO_BONDING_MITM;
699 return HCI_AT_NO_BONDING;
701 case L2CAP_CHAN_CONN_ORIENTED:
702 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
703 if (chan->sec_level == BT_SECURITY_LOW)
704 chan->sec_level = BT_SECURITY_SDP;
706 if (chan->sec_level == BT_SECURITY_HIGH)
707 return HCI_AT_NO_BONDING_MITM;
709 return HCI_AT_NO_BONDING;
713 switch (chan->sec_level) {
714 case BT_SECURITY_HIGH:
715 return HCI_AT_GENERAL_BONDING_MITM;
716 case BT_SECURITY_MEDIUM:
717 return HCI_AT_GENERAL_BONDING;
719 return HCI_AT_NO_BONDING;
725 /* Service level security */
726 int l2cap_chan_check_security(struct l2cap_chan *chan)
728 struct l2cap_conn *conn = chan->conn;
731 auth_type = l2cap_get_auth_type(chan);
733 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
736 static u8 l2cap_get_ident(struct l2cap_conn *conn)
740 /* Get next available identificator.
741 * 1 - 128 are used by kernel.
742 * 129 - 199 are reserved.
743 * 200 - 254 are used by utilities like l2ping, etc.
746 spin_lock(&conn->lock);
748 if (++conn->tx_ident > 128)
753 spin_unlock(&conn->lock);
758 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
761 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
764 BT_DBG("code 0x%2.2x", code);
769 if (lmp_no_flush_capable(conn->hcon->hdev))
770 flags = ACL_START_NO_FLUSH;
774 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
775 skb->priority = HCI_PRIO_MAX;
777 hci_send_acl(conn->hchan, skb, flags);
780 static bool __chan_is_moving(struct l2cap_chan *chan)
782 return chan->move_state != L2CAP_MOVE_STABLE &&
783 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
786 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
788 struct hci_conn *hcon = chan->conn->hcon;
791 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
794 if (chan->hs_hcon && !__chan_is_moving(chan)) {
796 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
803 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
804 lmp_no_flush_capable(hcon->hdev))
805 flags = ACL_START_NO_FLUSH;
809 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
810 hci_send_acl(chan->conn->hchan, skb, flags);
813 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
815 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
816 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
818 if (enh & L2CAP_CTRL_FRAME_TYPE) {
821 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
822 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
829 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
830 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
837 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
839 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
840 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
842 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
845 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
846 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
853 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
854 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
861 static inline void __unpack_control(struct l2cap_chan *chan,
864 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
865 __unpack_extended_control(get_unaligned_le32(skb->data),
866 &bt_cb(skb)->control);
867 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
869 __unpack_enhanced_control(get_unaligned_le16(skb->data),
870 &bt_cb(skb)->control);
871 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
875 static u32 __pack_extended_control(struct l2cap_ctrl *control)
879 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
880 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
882 if (control->sframe) {
883 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
884 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
885 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
887 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
888 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
894 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
898 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
899 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
901 if (control->sframe) {
902 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
903 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
904 packed |= L2CAP_CTRL_FRAME_TYPE;
906 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
907 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
913 static inline void __pack_control(struct l2cap_chan *chan,
914 struct l2cap_ctrl *control,
917 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
918 put_unaligned_le32(__pack_extended_control(control),
919 skb->data + L2CAP_HDR_SIZE);
921 put_unaligned_le16(__pack_enhanced_control(control),
922 skb->data + L2CAP_HDR_SIZE);
926 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
928 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
929 return L2CAP_EXT_HDR_SIZE;
931 return L2CAP_ENH_HDR_SIZE;
934 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
938 struct l2cap_hdr *lh;
939 int hlen = __ertm_hdr_size(chan);
941 if (chan->fcs == L2CAP_FCS_CRC16)
942 hlen += L2CAP_FCS_SIZE;
944 skb = bt_skb_alloc(hlen, GFP_KERNEL);
947 return ERR_PTR(-ENOMEM);
949 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
950 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
951 lh->cid = cpu_to_le16(chan->dcid);
953 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
954 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
956 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
958 if (chan->fcs == L2CAP_FCS_CRC16) {
959 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
960 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
963 skb->priority = HCI_PRIO_MAX;
967 static void l2cap_send_sframe(struct l2cap_chan *chan,
968 struct l2cap_ctrl *control)
973 BT_DBG("chan %p, control %p", chan, control);
975 if (!control->sframe)
978 if (__chan_is_moving(chan))
981 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
985 if (control->super == L2CAP_SUPER_RR)
986 clear_bit(CONN_RNR_SENT, &chan->conn_state);
987 else if (control->super == L2CAP_SUPER_RNR)
988 set_bit(CONN_RNR_SENT, &chan->conn_state);
990 if (control->super != L2CAP_SUPER_SREJ) {
991 chan->last_acked_seq = control->reqseq;
992 __clear_ack_timer(chan);
995 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
996 control->final, control->poll, control->super);
998 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
999 control_field = __pack_extended_control(control);
1001 control_field = __pack_enhanced_control(control);
1003 skb = l2cap_create_sframe_pdu(chan, control_field);
1005 l2cap_do_send(chan, skb);
1008 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1010 struct l2cap_ctrl control;
1012 BT_DBG("chan %p, poll %d", chan, poll);
1014 memset(&control, 0, sizeof(control));
1016 control.poll = poll;
1018 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1019 control.super = L2CAP_SUPER_RNR;
1021 control.super = L2CAP_SUPER_RR;
1023 control.reqseq = chan->buffer_seq;
1024 l2cap_send_sframe(chan, &control);
1027 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1029 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1032 static bool __amp_capable(struct l2cap_chan *chan)
1034 struct l2cap_conn *conn = chan->conn;
1035 struct hci_dev *hdev;
1036 bool amp_available = false;
1038 if (!conn->hs_enabled)
1041 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1044 read_lock(&hci_dev_list_lock);
1045 list_for_each_entry(hdev, &hci_dev_list, list) {
1046 if (hdev->amp_type != AMP_TYPE_BREDR &&
1047 test_bit(HCI_UP, &hdev->flags)) {
1048 amp_available = true;
1052 read_unlock(&hci_dev_list_lock);
1054 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1055 return amp_available;
1060 static bool l2cap_check_efs(struct l2cap_chan *chan)
1062 /* Check EFS parameters */
1066 void l2cap_send_conn_req(struct l2cap_chan *chan)
1068 struct l2cap_conn *conn = chan->conn;
1069 struct l2cap_conn_req req;
1071 req.scid = cpu_to_le16(chan->scid);
1072 req.psm = chan->psm;
1074 chan->ident = l2cap_get_ident(conn);
1076 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1078 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1081 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1083 struct l2cap_create_chan_req req;
1084 req.scid = cpu_to_le16(chan->scid);
1085 req.psm = chan->psm;
1086 req.amp_id = amp_id;
1088 chan->ident = l2cap_get_ident(chan->conn);
1090 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1094 static void l2cap_move_setup(struct l2cap_chan *chan)
1096 struct sk_buff *skb;
1098 BT_DBG("chan %p", chan);
1100 if (chan->mode != L2CAP_MODE_ERTM)
1103 __clear_retrans_timer(chan);
1104 __clear_monitor_timer(chan);
1105 __clear_ack_timer(chan);
1107 chan->retry_count = 0;
1108 skb_queue_walk(&chan->tx_q, skb) {
1109 if (bt_cb(skb)->control.retries)
1110 bt_cb(skb)->control.retries = 1;
1115 chan->expected_tx_seq = chan->buffer_seq;
1117 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1118 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1119 l2cap_seq_list_clear(&chan->retrans_list);
1120 l2cap_seq_list_clear(&chan->srej_list);
1121 skb_queue_purge(&chan->srej_q);
1123 chan->tx_state = L2CAP_TX_STATE_XMIT;
1124 chan->rx_state = L2CAP_RX_STATE_MOVE;
1126 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1129 static void l2cap_move_done(struct l2cap_chan *chan)
1131 u8 move_role = chan->move_role;
1132 BT_DBG("chan %p", chan);
1134 chan->move_state = L2CAP_MOVE_STABLE;
1135 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1137 if (chan->mode != L2CAP_MODE_ERTM)
1140 switch (move_role) {
1141 case L2CAP_MOVE_ROLE_INITIATOR:
1142 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1143 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1145 case L2CAP_MOVE_ROLE_RESPONDER:
1146 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1151 static void l2cap_chan_ready(struct l2cap_chan *chan)
1153 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1154 chan->conf_state = 0;
1155 __clear_chan_timer(chan);
1157 chan->state = BT_CONNECTED;
1159 chan->ops->ready(chan);
1162 static void l2cap_start_connection(struct l2cap_chan *chan)
1164 if (__amp_capable(chan)) {
1165 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1166 a2mp_discover_amp(chan);
1168 l2cap_send_conn_req(chan);
1172 static void l2cap_do_start(struct l2cap_chan *chan)
1174 struct l2cap_conn *conn = chan->conn;
1176 if (conn->hcon->type == LE_LINK) {
1177 l2cap_chan_ready(chan);
1181 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1182 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1185 if (l2cap_chan_check_security(chan) &&
1186 __l2cap_no_conn_pending(chan)) {
1187 l2cap_start_connection(chan);
1190 struct l2cap_info_req req;
1191 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1193 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1194 conn->info_ident = l2cap_get_ident(conn);
1196 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1198 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1203 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1205 u32 local_feat_mask = l2cap_feat_mask;
1207 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1210 case L2CAP_MODE_ERTM:
1211 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1212 case L2CAP_MODE_STREAMING:
1213 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1219 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1221 struct sock *sk = chan->sk;
1222 struct l2cap_conn *conn = chan->conn;
1223 struct l2cap_disconn_req req;
1228 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1229 __clear_retrans_timer(chan);
1230 __clear_monitor_timer(chan);
1231 __clear_ack_timer(chan);
1234 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1235 l2cap_state_change(chan, BT_DISCONN);
1239 req.dcid = cpu_to_le16(chan->dcid);
1240 req.scid = cpu_to_le16(chan->scid);
1241 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1245 __l2cap_state_change(chan, BT_DISCONN);
1246 __l2cap_chan_set_err(chan, err);
1250 /* ---- L2CAP connections ---- */
1251 static void l2cap_conn_start(struct l2cap_conn *conn)
1253 struct l2cap_chan *chan, *tmp;
1255 BT_DBG("conn %p", conn);
1257 mutex_lock(&conn->chan_lock);
1259 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1260 struct sock *sk = chan->sk;
1262 l2cap_chan_lock(chan);
1264 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1265 l2cap_chan_unlock(chan);
1269 if (chan->state == BT_CONNECT) {
1270 if (!l2cap_chan_check_security(chan) ||
1271 !__l2cap_no_conn_pending(chan)) {
1272 l2cap_chan_unlock(chan);
1276 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1277 && test_bit(CONF_STATE2_DEVICE,
1278 &chan->conf_state)) {
1279 l2cap_chan_close(chan, ECONNRESET);
1280 l2cap_chan_unlock(chan);
1284 l2cap_start_connection(chan);
1286 } else if (chan->state == BT_CONNECT2) {
1287 struct l2cap_conn_rsp rsp;
1289 rsp.scid = cpu_to_le16(chan->dcid);
1290 rsp.dcid = cpu_to_le16(chan->scid);
1292 if (l2cap_chan_check_security(chan)) {
1294 if (test_bit(BT_SK_DEFER_SETUP,
1295 &bt_sk(sk)->flags)) {
1296 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1297 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1298 chan->ops->defer(chan);
1301 __l2cap_state_change(chan, BT_CONFIG);
1302 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1303 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1307 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1308 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1311 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1314 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1315 rsp.result != L2CAP_CR_SUCCESS) {
1316 l2cap_chan_unlock(chan);
1320 set_bit(CONF_REQ_SENT, &chan->conf_state);
1321 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1322 l2cap_build_conf_req(chan, buf), buf);
1323 chan->num_conf_req++;
1326 l2cap_chan_unlock(chan);
1329 mutex_unlock(&conn->chan_lock);
1332 /* Find socket with cid and source/destination bdaddr.
1333 * Returns closest match, locked.
1335 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1339 struct l2cap_chan *c, *c1 = NULL;
1341 read_lock(&chan_list_lock);
1343 list_for_each_entry(c, &chan_list, global_l) {
1344 struct sock *sk = c->sk;
1346 if (state && c->state != state)
1349 if (c->scid == cid) {
1350 int src_match, dst_match;
1351 int src_any, dst_any;
1354 src_match = !bacmp(&bt_sk(sk)->src, src);
1355 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1356 if (src_match && dst_match) {
1357 read_unlock(&chan_list_lock);
1362 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1363 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1364 if ((src_match && dst_any) || (src_any && dst_match) ||
1365 (src_any && dst_any))
1370 read_unlock(&chan_list_lock);
1375 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1377 struct sock *parent;
1378 struct l2cap_chan *chan, *pchan;
1382 /* Check if we have socket listening on cid */
1383 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1384 conn->src, conn->dst);
1388 /* Client ATT sockets should override the server one */
1389 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1396 chan = pchan->ops->new_connection(pchan);
1400 chan->dcid = L2CAP_CID_ATT;
1402 bacpy(&bt_sk(chan->sk)->src, conn->src);
1403 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1405 __l2cap_chan_add(conn, chan);
1408 release_sock(parent);
1411 static void l2cap_conn_ready(struct l2cap_conn *conn)
1413 struct l2cap_chan *chan;
1414 struct hci_conn *hcon = conn->hcon;
1416 BT_DBG("conn %p", conn);
1418 /* For outgoing pairing which doesn't necessarily have an
1419 * associated socket (e.g. mgmt_pair_device).
1421 if (hcon->out && hcon->type == LE_LINK)
1422 smp_conn_security(hcon, hcon->pending_sec_level);
1424 mutex_lock(&conn->chan_lock);
1426 if (hcon->type == LE_LINK)
1427 l2cap_le_conn_ready(conn);
1429 list_for_each_entry(chan, &conn->chan_l, list) {
1431 l2cap_chan_lock(chan);
1433 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1434 l2cap_chan_unlock(chan);
1438 if (hcon->type == LE_LINK) {
1439 if (smp_conn_security(hcon, chan->sec_level))
1440 l2cap_chan_ready(chan);
1442 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1443 struct sock *sk = chan->sk;
1444 __clear_chan_timer(chan);
1446 __l2cap_state_change(chan, BT_CONNECTED);
1447 sk->sk_state_change(sk);
1450 } else if (chan->state == BT_CONNECT) {
1451 l2cap_do_start(chan);
1454 l2cap_chan_unlock(chan);
1457 mutex_unlock(&conn->chan_lock);
1460 /* Notify sockets that we cannot guaranty reliability anymore */
1461 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1463 struct l2cap_chan *chan;
1465 BT_DBG("conn %p", conn);
1467 mutex_lock(&conn->chan_lock);
1469 list_for_each_entry(chan, &conn->chan_l, list) {
1470 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1471 l2cap_chan_set_err(chan, err);
1474 mutex_unlock(&conn->chan_lock);
1477 static void l2cap_info_timeout(struct work_struct *work)
1479 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1482 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1483 conn->info_ident = 0;
1485 l2cap_conn_start(conn);
1490 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1491 * callback is called during registration. The ->remove callback is called
1492 * during unregistration.
1493 * An l2cap_user object can either be explicitly unregistered or when the
1494 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1495 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1496 * External modules must own a reference to the l2cap_conn object if they intend
1497 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1498 * any time if they don't.
1501 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1503 struct hci_dev *hdev = conn->hcon->hdev;
1506 /* We need to check whether l2cap_conn is registered. If it is not, we
1507 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1508 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1509 * relies on the parent hci_conn object to be locked. This itself relies
1510 * on the hci_dev object to be locked. So we must lock the hci device
1515 if (user->list.next || user->list.prev) {
1520 /* conn->hchan is NULL after l2cap_conn_del() was called */
1526 ret = user->probe(conn, user);
1530 list_add(&user->list, &conn->users);
1534 hci_dev_unlock(hdev);
1537 EXPORT_SYMBOL(l2cap_register_user);
1539 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1541 struct hci_dev *hdev = conn->hcon->hdev;
1545 if (!user->list.next || !user->list.prev)
1548 list_del(&user->list);
1549 user->list.next = NULL;
1550 user->list.prev = NULL;
1551 user->remove(conn, user);
1554 hci_dev_unlock(hdev);
1556 EXPORT_SYMBOL(l2cap_unregister_user);
1558 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1560 struct l2cap_user *user;
1562 while (!list_empty(&conn->users)) {
1563 user = list_first_entry(&conn->users, struct l2cap_user, list);
1564 list_del(&user->list);
1565 user->list.next = NULL;
1566 user->list.prev = NULL;
1567 user->remove(conn, user);
1571 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1573 struct l2cap_conn *conn = hcon->l2cap_data;
1574 struct l2cap_chan *chan, *l;
1579 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1581 kfree_skb(conn->rx_skb);
1583 l2cap_unregister_all_users(conn);
1585 mutex_lock(&conn->chan_lock);
1588 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1589 l2cap_chan_hold(chan);
1590 l2cap_chan_lock(chan);
1592 l2cap_chan_del(chan, err);
1594 l2cap_chan_unlock(chan);
1596 chan->ops->close(chan);
1597 l2cap_chan_put(chan);
1600 mutex_unlock(&conn->chan_lock);
1602 hci_chan_del(conn->hchan);
1604 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1605 cancel_delayed_work_sync(&conn->info_timer);
1607 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1608 cancel_delayed_work_sync(&conn->security_timer);
1609 smp_chan_destroy(conn);
1612 hcon->l2cap_data = NULL;
1614 l2cap_conn_put(conn);
1617 static void security_timeout(struct work_struct *work)
1619 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1620 security_timer.work);
1622 BT_DBG("conn %p", conn);
1624 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1625 smp_chan_destroy(conn);
1626 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1630 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1632 struct l2cap_conn *conn = hcon->l2cap_data;
1633 struct hci_chan *hchan;
1638 hchan = hci_chan_create(hcon);
1642 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1644 hci_chan_del(hchan);
1648 kref_init(&conn->ref);
1649 hcon->l2cap_data = conn;
1651 hci_conn_get(conn->hcon);
1652 conn->hchan = hchan;
1654 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1656 switch (hcon->type) {
1658 if (hcon->hdev->le_mtu) {
1659 conn->mtu = hcon->hdev->le_mtu;
1664 conn->mtu = hcon->hdev->acl_mtu;
1668 conn->src = &hcon->hdev->bdaddr;
1669 conn->dst = &hcon->dst;
1671 conn->feat_mask = 0;
1673 if (hcon->type == ACL_LINK)
1674 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1675 &hcon->hdev->dev_flags);
1677 spin_lock_init(&conn->lock);
1678 mutex_init(&conn->chan_lock);
1680 INIT_LIST_HEAD(&conn->chan_l);
1681 INIT_LIST_HEAD(&conn->users);
1683 if (hcon->type == LE_LINK)
1684 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1686 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1688 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1693 static void l2cap_conn_free(struct kref *ref)
1695 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1697 hci_conn_put(conn->hcon);
1701 void l2cap_conn_get(struct l2cap_conn *conn)
1703 kref_get(&conn->ref);
1705 EXPORT_SYMBOL(l2cap_conn_get);
1707 void l2cap_conn_put(struct l2cap_conn *conn)
1709 kref_put(&conn->ref, l2cap_conn_free);
1711 EXPORT_SYMBOL(l2cap_conn_put);
1713 /* ---- Socket interface ---- */
1715 /* Find socket with psm and source / destination bdaddr.
1716 * Returns closest match.
1718 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1722 struct l2cap_chan *c, *c1 = NULL;
1724 read_lock(&chan_list_lock);
1726 list_for_each_entry(c, &chan_list, global_l) {
1727 struct sock *sk = c->sk;
1729 if (state && c->state != state)
1732 if (c->psm == psm) {
1733 int src_match, dst_match;
1734 int src_any, dst_any;
1737 src_match = !bacmp(&bt_sk(sk)->src, src);
1738 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1739 if (src_match && dst_match) {
1740 read_unlock(&chan_list_lock);
1745 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1746 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1747 if ((src_match && dst_any) || (src_any && dst_match) ||
1748 (src_any && dst_any))
1753 read_unlock(&chan_list_lock);
1758 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1759 bdaddr_t *dst, u8 dst_type)
1761 struct sock *sk = chan->sk;
1762 bdaddr_t *src = &bt_sk(sk)->src;
1763 struct l2cap_conn *conn;
1764 struct hci_conn *hcon;
1765 struct hci_dev *hdev;
1769 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1770 dst_type, __le16_to_cpu(psm));
1772 hdev = hci_get_route(dst, src);
1774 return -EHOSTUNREACH;
1778 l2cap_chan_lock(chan);
1780 /* PSM must be odd and lsb of upper byte must be 0 */
1781 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1782 chan->chan_type != L2CAP_CHAN_RAW) {
1787 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1792 switch (chan->mode) {
1793 case L2CAP_MODE_BASIC:
1795 case L2CAP_MODE_ERTM:
1796 case L2CAP_MODE_STREAMING:
1805 switch (chan->state) {
1809 /* Already connecting */
1814 /* Already connected */
1828 /* Set destination address and psm */
1830 bacpy(&bt_sk(sk)->dst, dst);
1836 auth_type = l2cap_get_auth_type(chan);
1838 if (bdaddr_type_is_le(dst_type))
1839 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1840 chan->sec_level, auth_type);
1842 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1843 chan->sec_level, auth_type);
1846 err = PTR_ERR(hcon);
1850 conn = l2cap_conn_add(hcon);
1852 hci_conn_drop(hcon);
1857 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1858 hci_conn_drop(hcon);
1863 /* Update source addr of the socket */
1864 bacpy(src, conn->src);
1866 l2cap_chan_unlock(chan);
1867 l2cap_chan_add(conn, chan);
1868 l2cap_chan_lock(chan);
1870 /* l2cap_chan_add takes its own ref so we can drop this one */
1871 hci_conn_drop(hcon);
1873 l2cap_state_change(chan, BT_CONNECT);
1874 __set_chan_timer(chan, sk->sk_sndtimeo);
1876 if (hcon->state == BT_CONNECTED) {
1877 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1878 __clear_chan_timer(chan);
1879 if (l2cap_chan_check_security(chan))
1880 l2cap_state_change(chan, BT_CONNECTED);
1882 l2cap_do_start(chan);
1888 l2cap_chan_unlock(chan);
1889 hci_dev_unlock(hdev);
1894 int __l2cap_wait_ack(struct sock *sk)
1896 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1897 DECLARE_WAITQUEUE(wait, current);
1901 add_wait_queue(sk_sleep(sk), &wait);
1902 set_current_state(TASK_INTERRUPTIBLE);
1903 while (chan->unacked_frames > 0 && chan->conn) {
1907 if (signal_pending(current)) {
1908 err = sock_intr_errno(timeo);
1913 timeo = schedule_timeout(timeo);
1915 set_current_state(TASK_INTERRUPTIBLE);
1917 err = sock_error(sk);
1921 set_current_state(TASK_RUNNING);
1922 remove_wait_queue(sk_sleep(sk), &wait);
1926 static void l2cap_monitor_timeout(struct work_struct *work)
1928 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1929 monitor_timer.work);
1931 BT_DBG("chan %p", chan);
1933 l2cap_chan_lock(chan);
1936 l2cap_chan_unlock(chan);
1937 l2cap_chan_put(chan);
1941 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1943 l2cap_chan_unlock(chan);
1944 l2cap_chan_put(chan);
1947 static void l2cap_retrans_timeout(struct work_struct *work)
1949 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1950 retrans_timer.work);
1952 BT_DBG("chan %p", chan);
1954 l2cap_chan_lock(chan);
1957 l2cap_chan_unlock(chan);
1958 l2cap_chan_put(chan);
1962 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1963 l2cap_chan_unlock(chan);
1964 l2cap_chan_put(chan);
1967 static void l2cap_streaming_send(struct l2cap_chan *chan,
1968 struct sk_buff_head *skbs)
1970 struct sk_buff *skb;
1971 struct l2cap_ctrl *control;
1973 BT_DBG("chan %p, skbs %p", chan, skbs);
1975 if (__chan_is_moving(chan))
1978 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1980 while (!skb_queue_empty(&chan->tx_q)) {
1982 skb = skb_dequeue(&chan->tx_q);
1984 bt_cb(skb)->control.retries = 1;
1985 control = &bt_cb(skb)->control;
1987 control->reqseq = 0;
1988 control->txseq = chan->next_tx_seq;
1990 __pack_control(chan, control, skb);
1992 if (chan->fcs == L2CAP_FCS_CRC16) {
1993 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1994 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1997 l2cap_do_send(chan, skb);
1999 BT_DBG("Sent txseq %u", control->txseq);
2001 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2002 chan->frames_sent++;
2006 static int l2cap_ertm_send(struct l2cap_chan *chan)
2008 struct sk_buff *skb, *tx_skb;
2009 struct l2cap_ctrl *control;
2012 BT_DBG("chan %p", chan);
2014 if (chan->state != BT_CONNECTED)
2017 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2020 if (__chan_is_moving(chan))
2023 while (chan->tx_send_head &&
2024 chan->unacked_frames < chan->remote_tx_win &&
2025 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2027 skb = chan->tx_send_head;
2029 bt_cb(skb)->control.retries = 1;
2030 control = &bt_cb(skb)->control;
2032 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2035 control->reqseq = chan->buffer_seq;
2036 chan->last_acked_seq = chan->buffer_seq;
2037 control->txseq = chan->next_tx_seq;
2039 __pack_control(chan, control, skb);
2041 if (chan->fcs == L2CAP_FCS_CRC16) {
2042 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2043 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2046 /* Clone after data has been modified. Data is assumed to be
2047 read-only (for locking purposes) on cloned sk_buffs.
2049 tx_skb = skb_clone(skb, GFP_KERNEL);
2054 __set_retrans_timer(chan);
2056 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2057 chan->unacked_frames++;
2058 chan->frames_sent++;
2061 if (skb_queue_is_last(&chan->tx_q, skb))
2062 chan->tx_send_head = NULL;
2064 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2066 l2cap_do_send(chan, tx_skb);
2067 BT_DBG("Sent txseq %u", control->txseq);
2070 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2071 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2076 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2078 struct l2cap_ctrl control;
2079 struct sk_buff *skb;
2080 struct sk_buff *tx_skb;
2083 BT_DBG("chan %p", chan);
2085 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2088 if (__chan_is_moving(chan))
2091 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2092 seq = l2cap_seq_list_pop(&chan->retrans_list);
2094 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2096 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2101 bt_cb(skb)->control.retries++;
2102 control = bt_cb(skb)->control;
2104 if (chan->max_tx != 0 &&
2105 bt_cb(skb)->control.retries > chan->max_tx) {
2106 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2107 l2cap_send_disconn_req(chan, ECONNRESET);
2108 l2cap_seq_list_clear(&chan->retrans_list);
2112 control.reqseq = chan->buffer_seq;
2113 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2118 if (skb_cloned(skb)) {
2119 /* Cloned sk_buffs are read-only, so we need a
2122 tx_skb = skb_copy(skb, GFP_KERNEL);
2124 tx_skb = skb_clone(skb, GFP_KERNEL);
2128 l2cap_seq_list_clear(&chan->retrans_list);
2132 /* Update skb contents */
2133 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2134 put_unaligned_le32(__pack_extended_control(&control),
2135 tx_skb->data + L2CAP_HDR_SIZE);
2137 put_unaligned_le16(__pack_enhanced_control(&control),
2138 tx_skb->data + L2CAP_HDR_SIZE);
2141 if (chan->fcs == L2CAP_FCS_CRC16) {
2142 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2143 put_unaligned_le16(fcs, skb_put(tx_skb,
2147 l2cap_do_send(chan, tx_skb);
2149 BT_DBG("Resent txseq %d", control.txseq);
2151 chan->last_acked_seq = chan->buffer_seq;
2155 static void l2cap_retransmit(struct l2cap_chan *chan,
2156 struct l2cap_ctrl *control)
2158 BT_DBG("chan %p, control %p", chan, control);
2160 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2161 l2cap_ertm_resend(chan);
2164 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2165 struct l2cap_ctrl *control)
2167 struct sk_buff *skb;
2169 BT_DBG("chan %p, control %p", chan, control);
2172 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2174 l2cap_seq_list_clear(&chan->retrans_list);
2176 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2179 if (chan->unacked_frames) {
2180 skb_queue_walk(&chan->tx_q, skb) {
2181 if (bt_cb(skb)->control.txseq == control->reqseq ||
2182 skb == chan->tx_send_head)
2186 skb_queue_walk_from(&chan->tx_q, skb) {
2187 if (skb == chan->tx_send_head)
2190 l2cap_seq_list_append(&chan->retrans_list,
2191 bt_cb(skb)->control.txseq);
2194 l2cap_ertm_resend(chan);
2198 static void l2cap_send_ack(struct l2cap_chan *chan)
2200 struct l2cap_ctrl control;
2201 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2202 chan->last_acked_seq);
2205 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2206 chan, chan->last_acked_seq, chan->buffer_seq);
2208 memset(&control, 0, sizeof(control));
2211 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2212 chan->rx_state == L2CAP_RX_STATE_RECV) {
2213 __clear_ack_timer(chan);
2214 control.super = L2CAP_SUPER_RNR;
2215 control.reqseq = chan->buffer_seq;
2216 l2cap_send_sframe(chan, &control);
2218 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2219 l2cap_ertm_send(chan);
2220 /* If any i-frames were sent, they included an ack */
2221 if (chan->buffer_seq == chan->last_acked_seq)
2225 /* Ack now if the window is 3/4ths full.
2226 * Calculate without mul or div
2228 threshold = chan->ack_win;
2229 threshold += threshold << 1;
2232 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2235 if (frames_to_ack >= threshold) {
2236 __clear_ack_timer(chan);
2237 control.super = L2CAP_SUPER_RR;
2238 control.reqseq = chan->buffer_seq;
2239 l2cap_send_sframe(chan, &control);
2244 __set_ack_timer(chan);
2248 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2249 struct msghdr *msg, int len,
2250 int count, struct sk_buff *skb)
2252 struct l2cap_conn *conn = chan->conn;
2253 struct sk_buff **frag;
2256 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2262 /* Continuation fragments (no L2CAP header) */
2263 frag = &skb_shinfo(skb)->frag_list;
2265 struct sk_buff *tmp;
2267 count = min_t(unsigned int, conn->mtu, len);
2269 tmp = chan->ops->alloc_skb(chan, count,
2270 msg->msg_flags & MSG_DONTWAIT);
2272 return PTR_ERR(tmp);
2276 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2279 (*frag)->priority = skb->priority;
2284 skb->len += (*frag)->len;
2285 skb->data_len += (*frag)->len;
2287 frag = &(*frag)->next;
2293 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2294 struct msghdr *msg, size_t len,
2297 struct l2cap_conn *conn = chan->conn;
2298 struct sk_buff *skb;
2299 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2300 struct l2cap_hdr *lh;
2302 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2303 __le16_to_cpu(chan->psm), len, priority);
2305 count = min_t(unsigned int, (conn->mtu - hlen), len);
2307 skb = chan->ops->alloc_skb(chan, count + hlen,
2308 msg->msg_flags & MSG_DONTWAIT);
2312 skb->priority = priority;
2314 /* Create L2CAP header */
2315 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2316 lh->cid = cpu_to_le16(chan->dcid);
2317 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2318 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2320 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2321 if (unlikely(err < 0)) {
2323 return ERR_PTR(err);
2328 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2329 struct msghdr *msg, size_t len,
2332 struct l2cap_conn *conn = chan->conn;
2333 struct sk_buff *skb;
2335 struct l2cap_hdr *lh;
2337 BT_DBG("chan %p len %zu", chan, len);
2339 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2341 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2342 msg->msg_flags & MSG_DONTWAIT);
2346 skb->priority = priority;
2348 /* Create L2CAP header */
2349 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2350 lh->cid = cpu_to_le16(chan->dcid);
2351 lh->len = cpu_to_le16(len);
2353 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2354 if (unlikely(err < 0)) {
2356 return ERR_PTR(err);
2361 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2362 struct msghdr *msg, size_t len,
2365 struct l2cap_conn *conn = chan->conn;
2366 struct sk_buff *skb;
2367 int err, count, hlen;
2368 struct l2cap_hdr *lh;
2370 BT_DBG("chan %p len %zu", chan, len);
2373 return ERR_PTR(-ENOTCONN);
2375 hlen = __ertm_hdr_size(chan);
2378 hlen += L2CAP_SDULEN_SIZE;
2380 if (chan->fcs == L2CAP_FCS_CRC16)
2381 hlen += L2CAP_FCS_SIZE;
2383 count = min_t(unsigned int, (conn->mtu - hlen), len);
2385 skb = chan->ops->alloc_skb(chan, count + hlen,
2386 msg->msg_flags & MSG_DONTWAIT);
2390 /* Create L2CAP header */
2391 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2392 lh->cid = cpu_to_le16(chan->dcid);
2393 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2395 /* Control header is populated later */
2396 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2397 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2399 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2402 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2404 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2405 if (unlikely(err < 0)) {
2407 return ERR_PTR(err);
2410 bt_cb(skb)->control.fcs = chan->fcs;
2411 bt_cb(skb)->control.retries = 0;
2415 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2416 struct sk_buff_head *seg_queue,
2417 struct msghdr *msg, size_t len)
2419 struct sk_buff *skb;
2424 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2426 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2427 * so fragmented skbs are not used. The HCI layer's handling
2428 * of fragmented skbs is not compatible with ERTM's queueing.
2431 /* PDU size is derived from the HCI MTU */
2432 pdu_len = chan->conn->mtu;
2434 /* Constrain PDU size for BR/EDR connections */
2436 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2438 /* Adjust for largest possible L2CAP overhead. */
2440 pdu_len -= L2CAP_FCS_SIZE;
2442 pdu_len -= __ertm_hdr_size(chan);
2444 /* Remote device may have requested smaller PDUs */
2445 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2447 if (len <= pdu_len) {
2448 sar = L2CAP_SAR_UNSEGMENTED;
2452 sar = L2CAP_SAR_START;
2454 pdu_len -= L2CAP_SDULEN_SIZE;
2458 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2461 __skb_queue_purge(seg_queue);
2462 return PTR_ERR(skb);
2465 bt_cb(skb)->control.sar = sar;
2466 __skb_queue_tail(seg_queue, skb);
2471 pdu_len += L2CAP_SDULEN_SIZE;
2474 if (len <= pdu_len) {
2475 sar = L2CAP_SAR_END;
2478 sar = L2CAP_SAR_CONTINUE;
2485 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2488 struct sk_buff *skb;
2490 struct sk_buff_head seg_queue;
2492 /* Connectionless channel */
2493 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2494 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2496 return PTR_ERR(skb);
2498 l2cap_do_send(chan, skb);
2502 switch (chan->mode) {
2503 case L2CAP_MODE_BASIC:
2504 /* Check outgoing MTU */
2505 if (len > chan->omtu)
2508 /* Create a basic PDU */
2509 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2511 return PTR_ERR(skb);
2513 l2cap_do_send(chan, skb);
2517 case L2CAP_MODE_ERTM:
2518 case L2CAP_MODE_STREAMING:
2519 /* Check outgoing MTU */
2520 if (len > chan->omtu) {
2525 __skb_queue_head_init(&seg_queue);
2527 /* Do segmentation before calling in to the state machine,
2528 * since it's possible to block while waiting for memory
2531 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2533 /* The channel could have been closed while segmenting,
2534 * check that it is still connected.
2536 if (chan->state != BT_CONNECTED) {
2537 __skb_queue_purge(&seg_queue);
2544 if (chan->mode == L2CAP_MODE_ERTM)
2545 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2547 l2cap_streaming_send(chan, &seg_queue);
2551 /* If the skbs were not queued for sending, they'll still be in
2552 * seg_queue and need to be purged.
2554 __skb_queue_purge(&seg_queue);
2558 BT_DBG("bad state %1.1x", chan->mode);
2565 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2567 struct l2cap_ctrl control;
2570 BT_DBG("chan %p, txseq %u", chan, txseq);
2572 memset(&control, 0, sizeof(control));
2574 control.super = L2CAP_SUPER_SREJ;
2576 for (seq = chan->expected_tx_seq; seq != txseq;
2577 seq = __next_seq(chan, seq)) {
2578 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2579 control.reqseq = seq;
2580 l2cap_send_sframe(chan, &control);
2581 l2cap_seq_list_append(&chan->srej_list, seq);
2585 chan->expected_tx_seq = __next_seq(chan, txseq);
2588 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2590 struct l2cap_ctrl control;
2592 BT_DBG("chan %p", chan);
2594 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2597 memset(&control, 0, sizeof(control));
2599 control.super = L2CAP_SUPER_SREJ;
2600 control.reqseq = chan->srej_list.tail;
2601 l2cap_send_sframe(chan, &control);
2604 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2606 struct l2cap_ctrl control;
2610 BT_DBG("chan %p, txseq %u", chan, txseq);
2612 memset(&control, 0, sizeof(control));
2614 control.super = L2CAP_SUPER_SREJ;
2616 /* Capture initial list head to allow only one pass through the list. */
2617 initial_head = chan->srej_list.head;
2620 seq = l2cap_seq_list_pop(&chan->srej_list);
2621 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2624 control.reqseq = seq;
2625 l2cap_send_sframe(chan, &control);
2626 l2cap_seq_list_append(&chan->srej_list, seq);
2627 } while (chan->srej_list.head != initial_head);
2630 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2632 struct sk_buff *acked_skb;
2635 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2637 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2640 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2641 chan->expected_ack_seq, chan->unacked_frames);
2643 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2644 ackseq = __next_seq(chan, ackseq)) {
2646 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2648 skb_unlink(acked_skb, &chan->tx_q);
2649 kfree_skb(acked_skb);
2650 chan->unacked_frames--;
2654 chan->expected_ack_seq = reqseq;
2656 if (chan->unacked_frames == 0)
2657 __clear_retrans_timer(chan);
2659 BT_DBG("unacked_frames %u", chan->unacked_frames);
2662 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2664 BT_DBG("chan %p", chan);
2666 chan->expected_tx_seq = chan->buffer_seq;
2667 l2cap_seq_list_clear(&chan->srej_list);
2668 skb_queue_purge(&chan->srej_q);
2669 chan->rx_state = L2CAP_RX_STATE_RECV;
2672 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2673 struct l2cap_ctrl *control,
2674 struct sk_buff_head *skbs, u8 event)
2676 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2680 case L2CAP_EV_DATA_REQUEST:
2681 if (chan->tx_send_head == NULL)
2682 chan->tx_send_head = skb_peek(skbs);
2684 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2685 l2cap_ertm_send(chan);
2687 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2688 BT_DBG("Enter LOCAL_BUSY");
2689 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2691 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2692 /* The SREJ_SENT state must be aborted if we are to
2693 * enter the LOCAL_BUSY state.
2695 l2cap_abort_rx_srej_sent(chan);
2698 l2cap_send_ack(chan);
2701 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2702 BT_DBG("Exit LOCAL_BUSY");
2703 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2705 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2706 struct l2cap_ctrl local_control;
2708 memset(&local_control, 0, sizeof(local_control));
2709 local_control.sframe = 1;
2710 local_control.super = L2CAP_SUPER_RR;
2711 local_control.poll = 1;
2712 local_control.reqseq = chan->buffer_seq;
2713 l2cap_send_sframe(chan, &local_control);
2715 chan->retry_count = 1;
2716 __set_monitor_timer(chan);
2717 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2720 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2721 l2cap_process_reqseq(chan, control->reqseq);
2723 case L2CAP_EV_EXPLICIT_POLL:
2724 l2cap_send_rr_or_rnr(chan, 1);
2725 chan->retry_count = 1;
2726 __set_monitor_timer(chan);
2727 __clear_ack_timer(chan);
2728 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2730 case L2CAP_EV_RETRANS_TO:
2731 l2cap_send_rr_or_rnr(chan, 1);
2732 chan->retry_count = 1;
2733 __set_monitor_timer(chan);
2734 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2736 case L2CAP_EV_RECV_FBIT:
2737 /* Nothing to process */
2744 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2745 struct l2cap_ctrl *control,
2746 struct sk_buff_head *skbs, u8 event)
2748 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2752 case L2CAP_EV_DATA_REQUEST:
2753 if (chan->tx_send_head == NULL)
2754 chan->tx_send_head = skb_peek(skbs);
2755 /* Queue data, but don't send. */
2756 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2758 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2759 BT_DBG("Enter LOCAL_BUSY");
2760 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2762 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2763 /* The SREJ_SENT state must be aborted if we are to
2764 * enter the LOCAL_BUSY state.
2766 l2cap_abort_rx_srej_sent(chan);
2769 l2cap_send_ack(chan);
2772 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2773 BT_DBG("Exit LOCAL_BUSY");
2774 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2776 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2777 struct l2cap_ctrl local_control;
2778 memset(&local_control, 0, sizeof(local_control));
2779 local_control.sframe = 1;
2780 local_control.super = L2CAP_SUPER_RR;
2781 local_control.poll = 1;
2782 local_control.reqseq = chan->buffer_seq;
2783 l2cap_send_sframe(chan, &local_control);
2785 chan->retry_count = 1;
2786 __set_monitor_timer(chan);
2787 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2790 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2791 l2cap_process_reqseq(chan, control->reqseq);
2795 case L2CAP_EV_RECV_FBIT:
2796 if (control && control->final) {
2797 __clear_monitor_timer(chan);
2798 if (chan->unacked_frames > 0)
2799 __set_retrans_timer(chan);
2800 chan->retry_count = 0;
2801 chan->tx_state = L2CAP_TX_STATE_XMIT;
2802 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2805 case L2CAP_EV_EXPLICIT_POLL:
2808 case L2CAP_EV_MONITOR_TO:
2809 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2810 l2cap_send_rr_or_rnr(chan, 1);
2811 __set_monitor_timer(chan);
2812 chan->retry_count++;
2814 l2cap_send_disconn_req(chan, ECONNABORTED);
2822 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2823 struct sk_buff_head *skbs, u8 event)
2825 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2826 chan, control, skbs, event, chan->tx_state);
2828 switch (chan->tx_state) {
2829 case L2CAP_TX_STATE_XMIT:
2830 l2cap_tx_state_xmit(chan, control, skbs, event);
2832 case L2CAP_TX_STATE_WAIT_F:
2833 l2cap_tx_state_wait_f(chan, control, skbs, event);
2841 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2842 struct l2cap_ctrl *control)
2844 BT_DBG("chan %p, control %p", chan, control);
2845 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2848 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2849 struct l2cap_ctrl *control)
2851 BT_DBG("chan %p, control %p", chan, control);
2852 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2855 /* Copy frame to all raw sockets on that connection */
2856 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2858 struct sk_buff *nskb;
2859 struct l2cap_chan *chan;
2861 BT_DBG("conn %p", conn);
2863 mutex_lock(&conn->chan_lock);
2865 list_for_each_entry(chan, &conn->chan_l, list) {
2866 struct sock *sk = chan->sk;
2867 if (chan->chan_type != L2CAP_CHAN_RAW)
2870 /* Don't send frame to the socket it came from */
2873 nskb = skb_clone(skb, GFP_KERNEL);
2877 if (chan->ops->recv(chan, nskb))
2881 mutex_unlock(&conn->chan_lock);
2884 /* ---- L2CAP signalling commands ---- */
2885 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2886 u8 ident, u16 dlen, void *data)
2888 struct sk_buff *skb, **frag;
2889 struct l2cap_cmd_hdr *cmd;
2890 struct l2cap_hdr *lh;
2893 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2894 conn, code, ident, dlen);
2896 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2899 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2900 count = min_t(unsigned int, conn->mtu, len);
2902 skb = bt_skb_alloc(count, GFP_KERNEL);
2906 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2907 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2909 if (conn->hcon->type == LE_LINK)
2910 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2912 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2914 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2917 cmd->len = cpu_to_le16(dlen);
2920 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2921 memcpy(skb_put(skb, count), data, count);
2927 /* Continuation fragments (no L2CAP header) */
2928 frag = &skb_shinfo(skb)->frag_list;
2930 count = min_t(unsigned int, conn->mtu, len);
2932 *frag = bt_skb_alloc(count, GFP_KERNEL);
2936 memcpy(skb_put(*frag, count), data, count);
2941 frag = &(*frag)->next;
2951 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2954 struct l2cap_conf_opt *opt = *ptr;
2957 len = L2CAP_CONF_OPT_SIZE + opt->len;
2965 *val = *((u8 *) opt->val);
2969 *val = get_unaligned_le16(opt->val);
2973 *val = get_unaligned_le32(opt->val);
2977 *val = (unsigned long) opt->val;
2981 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2985 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2987 struct l2cap_conf_opt *opt = *ptr;
2989 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2996 *((u8 *) opt->val) = val;
3000 put_unaligned_le16(val, opt->val);
3004 put_unaligned_le32(val, opt->val);
3008 memcpy(opt->val, (void *) val, len);
3012 *ptr += L2CAP_CONF_OPT_SIZE + len;
3015 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3017 struct l2cap_conf_efs efs;
3019 switch (chan->mode) {
3020 case L2CAP_MODE_ERTM:
3021 efs.id = chan->local_id;
3022 efs.stype = chan->local_stype;
3023 efs.msdu = cpu_to_le16(chan->local_msdu);
3024 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3025 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3026 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3029 case L2CAP_MODE_STREAMING:
3031 efs.stype = L2CAP_SERV_BESTEFFORT;
3032 efs.msdu = cpu_to_le16(chan->local_msdu);
3033 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3042 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3043 (unsigned long) &efs);
3046 static void l2cap_ack_timeout(struct work_struct *work)
3048 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3052 BT_DBG("chan %p", chan);
3054 l2cap_chan_lock(chan);
3056 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3057 chan->last_acked_seq);
3060 l2cap_send_rr_or_rnr(chan, 0);
3062 l2cap_chan_unlock(chan);
3063 l2cap_chan_put(chan);
3066 int l2cap_ertm_init(struct l2cap_chan *chan)
3070 chan->next_tx_seq = 0;
3071 chan->expected_tx_seq = 0;
3072 chan->expected_ack_seq = 0;
3073 chan->unacked_frames = 0;
3074 chan->buffer_seq = 0;
3075 chan->frames_sent = 0;
3076 chan->last_acked_seq = 0;
3078 chan->sdu_last_frag = NULL;
3081 skb_queue_head_init(&chan->tx_q);
3083 chan->local_amp_id = AMP_ID_BREDR;
3084 chan->move_id = AMP_ID_BREDR;
3085 chan->move_state = L2CAP_MOVE_STABLE;
3086 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3088 if (chan->mode != L2CAP_MODE_ERTM)
3091 chan->rx_state = L2CAP_RX_STATE_RECV;
3092 chan->tx_state = L2CAP_TX_STATE_XMIT;
3094 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3095 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3096 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3098 skb_queue_head_init(&chan->srej_q);
3100 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3104 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3106 l2cap_seq_list_free(&chan->srej_list);
3111 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3114 case L2CAP_MODE_STREAMING:
3115 case L2CAP_MODE_ERTM:
3116 if (l2cap_mode_supported(mode, remote_feat_mask))
3120 return L2CAP_MODE_BASIC;
3124 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3126 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3129 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3131 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3134 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3135 struct l2cap_conf_rfc *rfc)
3137 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3138 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3140 /* Class 1 devices have must have ERTM timeouts
3141 * exceeding the Link Supervision Timeout. The
3142 * default Link Supervision Timeout for AMP
3143 * controllers is 10 seconds.
3145 * Class 1 devices use 0xffffffff for their
3146 * best-effort flush timeout, so the clamping logic
3147 * will result in a timeout that meets the above
3148 * requirement. ERTM timeouts are 16-bit values, so
3149 * the maximum timeout is 65.535 seconds.
3152 /* Convert timeout to milliseconds and round */
3153 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3155 /* This is the recommended formula for class 2 devices
3156 * that start ERTM timers when packets are sent to the
3159 ertm_to = 3 * ertm_to + 500;
3161 if (ertm_to > 0xffff)
3164 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3165 rfc->monitor_timeout = rfc->retrans_timeout;
3167 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3168 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3172 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3174 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3175 __l2cap_ews_supported(chan->conn)) {
3176 /* use extended control field */
3177 set_bit(FLAG_EXT_CTRL, &chan->flags);
3178 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3180 chan->tx_win = min_t(u16, chan->tx_win,
3181 L2CAP_DEFAULT_TX_WINDOW);
3182 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3184 chan->ack_win = chan->tx_win;
3187 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3189 struct l2cap_conf_req *req = data;
3190 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3191 void *ptr = req->data;
3194 BT_DBG("chan %p", chan);
3196 if (chan->num_conf_req || chan->num_conf_rsp)
3199 switch (chan->mode) {
3200 case L2CAP_MODE_STREAMING:
3201 case L2CAP_MODE_ERTM:
3202 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3205 if (__l2cap_efs_supported(chan->conn))
3206 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3210 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3215 if (chan->imtu != L2CAP_DEFAULT_MTU)
3216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3218 switch (chan->mode) {
3219 case L2CAP_MODE_BASIC:
3220 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3221 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3224 rfc.mode = L2CAP_MODE_BASIC;
3226 rfc.max_transmit = 0;
3227 rfc.retrans_timeout = 0;
3228 rfc.monitor_timeout = 0;
3229 rfc.max_pdu_size = 0;
3231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3232 (unsigned long) &rfc);
3235 case L2CAP_MODE_ERTM:
3236 rfc.mode = L2CAP_MODE_ERTM;
3237 rfc.max_transmit = chan->max_tx;
3239 __l2cap_set_ertm_timeouts(chan, &rfc);
3241 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3242 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3244 rfc.max_pdu_size = cpu_to_le16(size);
3246 l2cap_txwin_setup(chan);
3248 rfc.txwin_size = min_t(u16, chan->tx_win,
3249 L2CAP_DEFAULT_TX_WINDOW);
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3252 (unsigned long) &rfc);
3254 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3255 l2cap_add_opt_efs(&ptr, chan);
3257 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3261 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3262 if (chan->fcs == L2CAP_FCS_NONE ||
3263 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3264 chan->fcs = L2CAP_FCS_NONE;
3265 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3270 case L2CAP_MODE_STREAMING:
3271 l2cap_txwin_setup(chan);
3272 rfc.mode = L2CAP_MODE_STREAMING;
3274 rfc.max_transmit = 0;
3275 rfc.retrans_timeout = 0;
3276 rfc.monitor_timeout = 0;
3278 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3279 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3281 rfc.max_pdu_size = cpu_to_le16(size);
3283 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3284 (unsigned long) &rfc);
3286 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3287 l2cap_add_opt_efs(&ptr, chan);
3289 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3290 if (chan->fcs == L2CAP_FCS_NONE ||
3291 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3292 chan->fcs = L2CAP_FCS_NONE;
3293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3299 req->dcid = cpu_to_le16(chan->dcid);
3300 req->flags = __constant_cpu_to_le16(0);
3305 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3307 struct l2cap_conf_rsp *rsp = data;
3308 void *ptr = rsp->data;
3309 void *req = chan->conf_req;
3310 int len = chan->conf_len;
3311 int type, hint, olen;
3313 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3314 struct l2cap_conf_efs efs;
3316 u16 mtu = L2CAP_DEFAULT_MTU;
3317 u16 result = L2CAP_CONF_SUCCESS;
3320 BT_DBG("chan %p", chan);
3322 while (len >= L2CAP_CONF_OPT_SIZE) {
3323 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3325 hint = type & L2CAP_CONF_HINT;
3326 type &= L2CAP_CONF_MASK;
3329 case L2CAP_CONF_MTU:
3333 case L2CAP_CONF_FLUSH_TO:
3334 chan->flush_to = val;
3337 case L2CAP_CONF_QOS:
3340 case L2CAP_CONF_RFC:
3341 if (olen == sizeof(rfc))
3342 memcpy(&rfc, (void *) val, olen);
3345 case L2CAP_CONF_FCS:
3346 if (val == L2CAP_FCS_NONE)
3347 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3350 case L2CAP_CONF_EFS:
3352 if (olen == sizeof(efs))
3353 memcpy(&efs, (void *) val, olen);
3356 case L2CAP_CONF_EWS:
3357 if (!chan->conn->hs_enabled)
3358 return -ECONNREFUSED;
3360 set_bit(FLAG_EXT_CTRL, &chan->flags);
3361 set_bit(CONF_EWS_RECV, &chan->conf_state);
3362 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3363 chan->remote_tx_win = val;
3370 result = L2CAP_CONF_UNKNOWN;
3371 *((u8 *) ptr++) = type;
3376 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3379 switch (chan->mode) {
3380 case L2CAP_MODE_STREAMING:
3381 case L2CAP_MODE_ERTM:
3382 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3383 chan->mode = l2cap_select_mode(rfc.mode,
3384 chan->conn->feat_mask);
3389 if (__l2cap_efs_supported(chan->conn))
3390 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3392 return -ECONNREFUSED;
3395 if (chan->mode != rfc.mode)
3396 return -ECONNREFUSED;
3402 if (chan->mode != rfc.mode) {
3403 result = L2CAP_CONF_UNACCEPT;
3404 rfc.mode = chan->mode;
3406 if (chan->num_conf_rsp == 1)
3407 return -ECONNREFUSED;
3409 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3410 (unsigned long) &rfc);
3413 if (result == L2CAP_CONF_SUCCESS) {
3414 /* Configure output options and let the other side know
3415 * which ones we don't like. */
3417 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3418 result = L2CAP_CONF_UNACCEPT;
3421 set_bit(CONF_MTU_DONE, &chan->conf_state);
3423 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3426 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 efs.stype != chan->local_stype) {
3430 result = L2CAP_CONF_UNACCEPT;
3432 if (chan->num_conf_req >= 1)
3433 return -ECONNREFUSED;
3435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3437 (unsigned long) &efs);
3439 /* Send PENDING Conf Rsp */
3440 result = L2CAP_CONF_PENDING;
3441 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3446 case L2CAP_MODE_BASIC:
3447 chan->fcs = L2CAP_FCS_NONE;
3448 set_bit(CONF_MODE_DONE, &chan->conf_state);
3451 case L2CAP_MODE_ERTM:
3452 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3453 chan->remote_tx_win = rfc.txwin_size;
3455 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3457 chan->remote_max_tx = rfc.max_transmit;
3459 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3460 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3461 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3462 rfc.max_pdu_size = cpu_to_le16(size);
3463 chan->remote_mps = size;
3465 __l2cap_set_ertm_timeouts(chan, &rfc);
3467 set_bit(CONF_MODE_DONE, &chan->conf_state);
3469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3470 sizeof(rfc), (unsigned long) &rfc);
3472 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3473 chan->remote_id = efs.id;
3474 chan->remote_stype = efs.stype;
3475 chan->remote_msdu = le16_to_cpu(efs.msdu);
3476 chan->remote_flush_to =
3477 le32_to_cpu(efs.flush_to);
3478 chan->remote_acc_lat =
3479 le32_to_cpu(efs.acc_lat);
3480 chan->remote_sdu_itime =
3481 le32_to_cpu(efs.sdu_itime);
3482 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3484 (unsigned long) &efs);
3488 case L2CAP_MODE_STREAMING:
3489 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3490 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3491 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3492 rfc.max_pdu_size = cpu_to_le16(size);
3493 chan->remote_mps = size;
3495 set_bit(CONF_MODE_DONE, &chan->conf_state);
3497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3498 (unsigned long) &rfc);
3503 result = L2CAP_CONF_UNACCEPT;
3505 memset(&rfc, 0, sizeof(rfc));
3506 rfc.mode = chan->mode;
3509 if (result == L2CAP_CONF_SUCCESS)
3510 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3512 rsp->scid = cpu_to_le16(chan->dcid);
3513 rsp->result = cpu_to_le16(result);
3514 rsp->flags = __constant_cpu_to_le16(0);
3519 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3520 void *data, u16 *result)
3522 struct l2cap_conf_req *req = data;
3523 void *ptr = req->data;
3526 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3527 struct l2cap_conf_efs efs;
3529 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3531 while (len >= L2CAP_CONF_OPT_SIZE) {
3532 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3535 case L2CAP_CONF_MTU:
3536 if (val < L2CAP_DEFAULT_MIN_MTU) {
3537 *result = L2CAP_CONF_UNACCEPT;
3538 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3544 case L2CAP_CONF_FLUSH_TO:
3545 chan->flush_to = val;
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3550 case L2CAP_CONF_RFC:
3551 if (olen == sizeof(rfc))
3552 memcpy(&rfc, (void *)val, olen);
3554 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3555 rfc.mode != chan->mode)
3556 return -ECONNREFUSED;
3560 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3561 sizeof(rfc), (unsigned long) &rfc);
3564 case L2CAP_CONF_EWS:
3565 chan->ack_win = min_t(u16, val, chan->ack_win);
3566 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3570 case L2CAP_CONF_EFS:
3571 if (olen == sizeof(efs))
3572 memcpy(&efs, (void *)val, olen);
3574 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3575 efs.stype != L2CAP_SERV_NOTRAFIC &&
3576 efs.stype != chan->local_stype)
3577 return -ECONNREFUSED;
3579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3580 (unsigned long) &efs);
3583 case L2CAP_CONF_FCS:
3584 if (*result == L2CAP_CONF_PENDING)
3585 if (val == L2CAP_FCS_NONE)
3586 set_bit(CONF_RECV_NO_FCS,
3592 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3593 return -ECONNREFUSED;
3595 chan->mode = rfc.mode;
3597 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3599 case L2CAP_MODE_ERTM:
3600 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3601 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3602 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3603 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3604 chan->ack_win = min_t(u16, chan->ack_win,
3607 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3608 chan->local_msdu = le16_to_cpu(efs.msdu);
3609 chan->local_sdu_itime =
3610 le32_to_cpu(efs.sdu_itime);
3611 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3612 chan->local_flush_to =
3613 le32_to_cpu(efs.flush_to);
3617 case L2CAP_MODE_STREAMING:
3618 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3622 req->dcid = cpu_to_le16(chan->dcid);
3623 req->flags = __constant_cpu_to_le16(0);
3628 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3629 u16 result, u16 flags)
3631 struct l2cap_conf_rsp *rsp = data;
3632 void *ptr = rsp->data;
3634 BT_DBG("chan %p", chan);
3636 rsp->scid = cpu_to_le16(chan->dcid);
3637 rsp->result = cpu_to_le16(result);
3638 rsp->flags = cpu_to_le16(flags);
3643 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3645 struct l2cap_conn_rsp rsp;
3646 struct l2cap_conn *conn = chan->conn;
3650 rsp.scid = cpu_to_le16(chan->dcid);
3651 rsp.dcid = cpu_to_le16(chan->scid);
3652 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3653 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3656 rsp_code = L2CAP_CREATE_CHAN_RSP;
3658 rsp_code = L2CAP_CONN_RSP;
3660 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3662 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3664 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3667 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3668 l2cap_build_conf_req(chan, buf), buf);
3669 chan->num_conf_req++;
3672 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3676 /* Use sane default values in case a misbehaving remote device
3677 * did not send an RFC or extended window size option.
3679 u16 txwin_ext = chan->ack_win;
3680 struct l2cap_conf_rfc rfc = {
3682 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3683 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3684 .max_pdu_size = cpu_to_le16(chan->imtu),
3685 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3688 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3690 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3693 while (len >= L2CAP_CONF_OPT_SIZE) {
3694 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3697 case L2CAP_CONF_RFC:
3698 if (olen == sizeof(rfc))
3699 memcpy(&rfc, (void *)val, olen);
3701 case L2CAP_CONF_EWS:
3708 case L2CAP_MODE_ERTM:
3709 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3710 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3711 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3712 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3713 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3715 chan->ack_win = min_t(u16, chan->ack_win,
3718 case L2CAP_MODE_STREAMING:
3719 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3723 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3724 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3727 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3729 if (cmd_len < sizeof(*rej))
3732 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3735 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3736 cmd->ident == conn->info_ident) {
3737 cancel_delayed_work(&conn->info_timer);
3739 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3740 conn->info_ident = 0;
3742 l2cap_conn_start(conn);
3748 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3749 struct l2cap_cmd_hdr *cmd,
3750 u8 *data, u8 rsp_code, u8 amp_id)
3752 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3753 struct l2cap_conn_rsp rsp;
3754 struct l2cap_chan *chan = NULL, *pchan;
3755 struct sock *parent, *sk = NULL;
3756 int result, status = L2CAP_CS_NO_INFO;
3758 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3759 __le16 psm = req->psm;
3761 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3763 /* Check if we have socket listening on psm */
3764 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3766 result = L2CAP_CR_BAD_PSM;
3772 mutex_lock(&conn->chan_lock);
3775 /* Check if the ACL is secure enough (if not SDP) */
3776 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3777 !hci_conn_check_link_mode(conn->hcon)) {
3778 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3779 result = L2CAP_CR_SEC_BLOCK;
3783 result = L2CAP_CR_NO_MEM;
3785 /* Check if we already have channel with that dcid */
3786 if (__l2cap_get_chan_by_dcid(conn, scid))
3789 chan = pchan->ops->new_connection(pchan);
3795 /* For certain devices (ex: HID mouse), support for authentication,
3796 * pairing and bonding is optional. For such devices, inorder to avoid
3797 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3798 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3800 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3802 bacpy(&bt_sk(sk)->src, conn->src);
3803 bacpy(&bt_sk(sk)->dst, conn->dst);
3806 chan->local_amp_id = amp_id;
3808 __l2cap_chan_add(conn, chan);
3812 __set_chan_timer(chan, sk->sk_sndtimeo);
3814 chan->ident = cmd->ident;
3816 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3817 if (l2cap_chan_check_security(chan)) {
3818 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3819 __l2cap_state_change(chan, BT_CONNECT2);
3820 result = L2CAP_CR_PEND;
3821 status = L2CAP_CS_AUTHOR_PEND;
3822 chan->ops->defer(chan);
3824 /* Force pending result for AMP controllers.
3825 * The connection will succeed after the
3826 * physical link is up.
3828 if (amp_id == AMP_ID_BREDR) {
3829 __l2cap_state_change(chan, BT_CONFIG);
3830 result = L2CAP_CR_SUCCESS;
3832 __l2cap_state_change(chan, BT_CONNECT2);
3833 result = L2CAP_CR_PEND;
3835 status = L2CAP_CS_NO_INFO;
3838 __l2cap_state_change(chan, BT_CONNECT2);
3839 result = L2CAP_CR_PEND;
3840 status = L2CAP_CS_AUTHEN_PEND;
3843 __l2cap_state_change(chan, BT_CONNECT2);
3844 result = L2CAP_CR_PEND;
3845 status = L2CAP_CS_NO_INFO;
3849 release_sock(parent);
3850 mutex_unlock(&conn->chan_lock);
3853 rsp.scid = cpu_to_le16(scid);
3854 rsp.dcid = cpu_to_le16(dcid);
3855 rsp.result = cpu_to_le16(result);
3856 rsp.status = cpu_to_le16(status);
3857 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3859 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3860 struct l2cap_info_req info;
3861 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3863 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3864 conn->info_ident = l2cap_get_ident(conn);
3866 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3868 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3869 sizeof(info), &info);
3872 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3873 result == L2CAP_CR_SUCCESS) {
3875 set_bit(CONF_REQ_SENT, &chan->conf_state);
3876 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3877 l2cap_build_conf_req(chan, buf), buf);
3878 chan->num_conf_req++;
3884 static int l2cap_connect_req(struct l2cap_conn *conn,
3885 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3887 struct hci_dev *hdev = conn->hcon->hdev;
3888 struct hci_conn *hcon = conn->hcon;
3890 if (cmd_len < sizeof(struct l2cap_conn_req))
3894 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3895 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3896 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3897 hcon->dst_type, 0, NULL, 0,
3899 hci_dev_unlock(hdev);
3901 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3905 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3906 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3909 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3910 u16 scid, dcid, result, status;
3911 struct l2cap_chan *chan;
3915 if (cmd_len < sizeof(*rsp))
3918 scid = __le16_to_cpu(rsp->scid);
3919 dcid = __le16_to_cpu(rsp->dcid);
3920 result = __le16_to_cpu(rsp->result);
3921 status = __le16_to_cpu(rsp->status);
3923 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3924 dcid, scid, result, status);
3926 mutex_lock(&conn->chan_lock);
3929 chan = __l2cap_get_chan_by_scid(conn, scid);
3935 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3944 l2cap_chan_lock(chan);
3947 case L2CAP_CR_SUCCESS:
3948 l2cap_state_change(chan, BT_CONFIG);
3951 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3953 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3956 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3957 l2cap_build_conf_req(chan, req), req);
3958 chan->num_conf_req++;
3962 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3966 l2cap_chan_del(chan, ECONNREFUSED);
3970 l2cap_chan_unlock(chan);
3973 mutex_unlock(&conn->chan_lock);
3978 static inline void set_default_fcs(struct l2cap_chan *chan)
3980 /* FCS is enabled only in ERTM or streaming mode, if one or both
3983 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3984 chan->fcs = L2CAP_FCS_NONE;
3985 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3986 chan->fcs = L2CAP_FCS_CRC16;
3989 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3990 u8 ident, u16 flags)
3992 struct l2cap_conn *conn = chan->conn;
3994 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3997 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3998 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4000 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4001 l2cap_build_conf_rsp(chan, data,
4002 L2CAP_CONF_SUCCESS, flags), data);
4005 static inline int l2cap_config_req(struct l2cap_conn *conn,
4006 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4009 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4012 struct l2cap_chan *chan;
4015 if (cmd_len < sizeof(*req))
4018 dcid = __le16_to_cpu(req->dcid);
4019 flags = __le16_to_cpu(req->flags);
4021 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4023 chan = l2cap_get_chan_by_scid(conn, dcid);
4027 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4028 struct l2cap_cmd_rej_cid rej;
4030 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4031 rej.scid = cpu_to_le16(chan->scid);
4032 rej.dcid = cpu_to_le16(chan->dcid);
4034 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4039 /* Reject if config buffer is too small. */
4040 len = cmd_len - sizeof(*req);
4041 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4042 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4043 l2cap_build_conf_rsp(chan, rsp,
4044 L2CAP_CONF_REJECT, flags), rsp);
4049 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4050 chan->conf_len += len;
4052 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4053 /* Incomplete config. Send empty response. */
4054 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4055 l2cap_build_conf_rsp(chan, rsp,
4056 L2CAP_CONF_SUCCESS, flags), rsp);
4060 /* Complete config. */
4061 len = l2cap_parse_conf_req(chan, rsp);
4063 l2cap_send_disconn_req(chan, ECONNRESET);
4067 chan->ident = cmd->ident;
4068 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4069 chan->num_conf_rsp++;
4071 /* Reset config buffer. */
4074 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4077 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4078 set_default_fcs(chan);
4080 if (chan->mode == L2CAP_MODE_ERTM ||
4081 chan->mode == L2CAP_MODE_STREAMING)
4082 err = l2cap_ertm_init(chan);
4085 l2cap_send_disconn_req(chan, -err);
4087 l2cap_chan_ready(chan);
4092 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4094 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4095 l2cap_build_conf_req(chan, buf), buf);
4096 chan->num_conf_req++;
4099 /* Got Conf Rsp PENDING from remote side and asume we sent
4100 Conf Rsp PENDING in the code above */
4101 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4102 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4104 /* check compatibility */
4106 /* Send rsp for BR/EDR channel */
4108 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4110 chan->ident = cmd->ident;
4114 l2cap_chan_unlock(chan);
4118 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4119 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4122 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4123 u16 scid, flags, result;
4124 struct l2cap_chan *chan;
4125 int len = cmd_len - sizeof(*rsp);
4128 if (cmd_len < sizeof(*rsp))
4131 scid = __le16_to_cpu(rsp->scid);
4132 flags = __le16_to_cpu(rsp->flags);
4133 result = __le16_to_cpu(rsp->result);
4135 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4138 chan = l2cap_get_chan_by_scid(conn, scid);
4143 case L2CAP_CONF_SUCCESS:
4144 l2cap_conf_rfc_get(chan, rsp->data, len);
4145 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4148 case L2CAP_CONF_PENDING:
4149 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4151 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4154 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4157 l2cap_send_disconn_req(chan, ECONNRESET);
4161 if (!chan->hs_hcon) {
4162 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4165 if (l2cap_check_efs(chan)) {
4166 amp_create_logical_link(chan);
4167 chan->ident = cmd->ident;
4173 case L2CAP_CONF_UNACCEPT:
4174 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4177 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4178 l2cap_send_disconn_req(chan, ECONNRESET);
4182 /* throw out any old stored conf requests */
4183 result = L2CAP_CONF_SUCCESS;
4184 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4187 l2cap_send_disconn_req(chan, ECONNRESET);
4191 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4192 L2CAP_CONF_REQ, len, req);
4193 chan->num_conf_req++;
4194 if (result != L2CAP_CONF_SUCCESS)
4200 l2cap_chan_set_err(chan, ECONNRESET);
4202 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4203 l2cap_send_disconn_req(chan, ECONNRESET);
4207 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4210 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4212 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4213 set_default_fcs(chan);
4215 if (chan->mode == L2CAP_MODE_ERTM ||
4216 chan->mode == L2CAP_MODE_STREAMING)
4217 err = l2cap_ertm_init(chan);
4220 l2cap_send_disconn_req(chan, -err);
4222 l2cap_chan_ready(chan);
4226 l2cap_chan_unlock(chan);
4230 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4231 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4234 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4235 struct l2cap_disconn_rsp rsp;
4237 struct l2cap_chan *chan;
4240 if (cmd_len != sizeof(*req))
4243 scid = __le16_to_cpu(req->scid);
4244 dcid = __le16_to_cpu(req->dcid);
4246 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4248 mutex_lock(&conn->chan_lock);
4250 chan = __l2cap_get_chan_by_scid(conn, dcid);
4252 mutex_unlock(&conn->chan_lock);
4256 l2cap_chan_lock(chan);
4260 rsp.dcid = cpu_to_le16(chan->scid);
4261 rsp.scid = cpu_to_le16(chan->dcid);
4262 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4265 sk->sk_shutdown = SHUTDOWN_MASK;
4268 l2cap_chan_hold(chan);
4269 l2cap_chan_del(chan, ECONNRESET);
4271 l2cap_chan_unlock(chan);
4273 chan->ops->close(chan);
4274 l2cap_chan_put(chan);
4276 mutex_unlock(&conn->chan_lock);
4281 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4282 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4285 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4287 struct l2cap_chan *chan;
4289 if (cmd_len != sizeof(*rsp))
4292 scid = __le16_to_cpu(rsp->scid);
4293 dcid = __le16_to_cpu(rsp->dcid);
4295 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4297 mutex_lock(&conn->chan_lock);
4299 chan = __l2cap_get_chan_by_scid(conn, scid);
4301 mutex_unlock(&conn->chan_lock);
4305 l2cap_chan_lock(chan);
4307 l2cap_chan_hold(chan);
4308 l2cap_chan_del(chan, 0);
4310 l2cap_chan_unlock(chan);
4312 chan->ops->close(chan);
4313 l2cap_chan_put(chan);
4315 mutex_unlock(&conn->chan_lock);
4320 static inline int l2cap_information_req(struct l2cap_conn *conn,
4321 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4324 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4327 if (cmd_len != sizeof(*req))
4330 type = __le16_to_cpu(req->type);
4332 BT_DBG("type 0x%4.4x", type);
4334 if (type == L2CAP_IT_FEAT_MASK) {
4336 u32 feat_mask = l2cap_feat_mask;
4337 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4338 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4339 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4341 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4343 if (conn->hs_enabled)
4344 feat_mask |= L2CAP_FEAT_EXT_FLOW
4345 | L2CAP_FEAT_EXT_WINDOW;
4347 put_unaligned_le32(feat_mask, rsp->data);
4348 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4350 } else if (type == L2CAP_IT_FIXED_CHAN) {
4352 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4354 if (conn->hs_enabled)
4355 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4357 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4359 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4360 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4361 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4362 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4365 struct l2cap_info_rsp rsp;
4366 rsp.type = cpu_to_le16(type);
4367 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4368 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4375 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4376 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4379 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4382 if (cmd_len < sizeof(*rsp))
4385 type = __le16_to_cpu(rsp->type);
4386 result = __le16_to_cpu(rsp->result);
4388 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4390 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4391 if (cmd->ident != conn->info_ident ||
4392 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4395 cancel_delayed_work(&conn->info_timer);
4397 if (result != L2CAP_IR_SUCCESS) {
4398 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4399 conn->info_ident = 0;
4401 l2cap_conn_start(conn);
4407 case L2CAP_IT_FEAT_MASK:
4408 conn->feat_mask = get_unaligned_le32(rsp->data);
4410 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4411 struct l2cap_info_req req;
4412 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4414 conn->info_ident = l2cap_get_ident(conn);
4416 l2cap_send_cmd(conn, conn->info_ident,
4417 L2CAP_INFO_REQ, sizeof(req), &req);
4419 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4420 conn->info_ident = 0;
4422 l2cap_conn_start(conn);
4426 case L2CAP_IT_FIXED_CHAN:
4427 conn->fixed_chan_mask = rsp->data[0];
4428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4429 conn->info_ident = 0;
4431 l2cap_conn_start(conn);
4438 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4439 struct l2cap_cmd_hdr *cmd,
4440 u16 cmd_len, void *data)
4442 struct l2cap_create_chan_req *req = data;
4443 struct l2cap_create_chan_rsp rsp;
4444 struct l2cap_chan *chan;
4445 struct hci_dev *hdev;
4448 if (cmd_len != sizeof(*req))
4451 if (!conn->hs_enabled)
4454 psm = le16_to_cpu(req->psm);
4455 scid = le16_to_cpu(req->scid);
4457 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4459 /* For controller id 0 make BR/EDR connection */
4460 if (req->amp_id == AMP_ID_BREDR) {
4461 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4466 /* Validate AMP controller id */
4467 hdev = hci_dev_get(req->amp_id);
4471 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4476 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4479 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4480 struct hci_conn *hs_hcon;
4482 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4488 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4490 mgr->bredr_chan = chan;
4491 chan->hs_hcon = hs_hcon;
4492 chan->fcs = L2CAP_FCS_NONE;
4493 conn->mtu = hdev->block_mtu;
4502 rsp.scid = cpu_to_le16(scid);
4503 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4504 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4506 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4512 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4514 struct l2cap_move_chan_req req;
4517 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4519 ident = l2cap_get_ident(chan->conn);
4520 chan->ident = ident;
4522 req.icid = cpu_to_le16(chan->scid);
4523 req.dest_amp_id = dest_amp_id;
4525 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4528 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4531 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4533 struct l2cap_move_chan_rsp rsp;
4535 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4537 rsp.icid = cpu_to_le16(chan->dcid);
4538 rsp.result = cpu_to_le16(result);
4540 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4544 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4546 struct l2cap_move_chan_cfm cfm;
4548 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4550 chan->ident = l2cap_get_ident(chan->conn);
4552 cfm.icid = cpu_to_le16(chan->scid);
4553 cfm.result = cpu_to_le16(result);
4555 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4558 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4561 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4563 struct l2cap_move_chan_cfm cfm;
4565 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4567 cfm.icid = cpu_to_le16(icid);
4568 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4570 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4574 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4577 struct l2cap_move_chan_cfm_rsp rsp;
4579 BT_DBG("icid 0x%4.4x", icid);
4581 rsp.icid = cpu_to_le16(icid);
4582 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4585 static void __release_logical_link(struct l2cap_chan *chan)
4587 chan->hs_hchan = NULL;
4588 chan->hs_hcon = NULL;
4590 /* Placeholder - release the logical link */
4593 static void l2cap_logical_fail(struct l2cap_chan *chan)
4595 /* Logical link setup failed */
4596 if (chan->state != BT_CONNECTED) {
4597 /* Create channel failure, disconnect */
4598 l2cap_send_disconn_req(chan, ECONNRESET);
4602 switch (chan->move_role) {
4603 case L2CAP_MOVE_ROLE_RESPONDER:
4604 l2cap_move_done(chan);
4605 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4607 case L2CAP_MOVE_ROLE_INITIATOR:
4608 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4609 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4610 /* Remote has only sent pending or
4611 * success responses, clean up
4613 l2cap_move_done(chan);
4616 /* Other amp move states imply that the move
4617 * has already aborted
4619 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4624 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4625 struct hci_chan *hchan)
4627 struct l2cap_conf_rsp rsp;
4629 chan->hs_hchan = hchan;
4630 chan->hs_hcon->l2cap_data = chan->conn;
4632 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4634 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4637 set_default_fcs(chan);
4639 err = l2cap_ertm_init(chan);
4641 l2cap_send_disconn_req(chan, -err);
4643 l2cap_chan_ready(chan);
4647 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4648 struct hci_chan *hchan)
4650 chan->hs_hcon = hchan->conn;
4651 chan->hs_hcon->l2cap_data = chan->conn;
4653 BT_DBG("move_state %d", chan->move_state);
4655 switch (chan->move_state) {
4656 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4657 /* Move confirm will be sent after a success
4658 * response is received
4660 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4662 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4663 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4664 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4665 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4666 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4667 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4668 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4669 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4670 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4674 /* Move was not in expected state, free the channel */
4675 __release_logical_link(chan);
4677 chan->move_state = L2CAP_MOVE_STABLE;
4681 /* Call with chan locked */
4682 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4685 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4688 l2cap_logical_fail(chan);
4689 __release_logical_link(chan);
4693 if (chan->state != BT_CONNECTED) {
4694 /* Ignore logical link if channel is on BR/EDR */
4695 if (chan->local_amp_id != AMP_ID_BREDR)
4696 l2cap_logical_finish_create(chan, hchan);
4698 l2cap_logical_finish_move(chan, hchan);
4702 void l2cap_move_start(struct l2cap_chan *chan)
4704 BT_DBG("chan %p", chan);
4706 if (chan->local_amp_id == AMP_ID_BREDR) {
4707 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4709 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4710 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4711 /* Placeholder - start physical link setup */
4713 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4714 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4716 l2cap_move_setup(chan);
4717 l2cap_send_move_chan_req(chan, 0);
4721 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4722 u8 local_amp_id, u8 remote_amp_id)
4724 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4725 local_amp_id, remote_amp_id);
4727 chan->fcs = L2CAP_FCS_NONE;
4729 /* Outgoing channel on AMP */
4730 if (chan->state == BT_CONNECT) {
4731 if (result == L2CAP_CR_SUCCESS) {
4732 chan->local_amp_id = local_amp_id;
4733 l2cap_send_create_chan_req(chan, remote_amp_id);
4735 /* Revert to BR/EDR connect */
4736 l2cap_send_conn_req(chan);
4742 /* Incoming channel on AMP */
4743 if (__l2cap_no_conn_pending(chan)) {
4744 struct l2cap_conn_rsp rsp;
4746 rsp.scid = cpu_to_le16(chan->dcid);
4747 rsp.dcid = cpu_to_le16(chan->scid);
4749 if (result == L2CAP_CR_SUCCESS) {
4750 /* Send successful response */
4751 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4752 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4754 /* Send negative response */
4755 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4756 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4759 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4762 if (result == L2CAP_CR_SUCCESS) {
4763 __l2cap_state_change(chan, BT_CONFIG);
4764 set_bit(CONF_REQ_SENT, &chan->conf_state);
4765 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4767 l2cap_build_conf_req(chan, buf), buf);
4768 chan->num_conf_req++;
4773 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4776 l2cap_move_setup(chan);
4777 chan->move_id = local_amp_id;
4778 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4780 l2cap_send_move_chan_req(chan, remote_amp_id);
4783 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4785 struct hci_chan *hchan = NULL;
4787 /* Placeholder - get hci_chan for logical link */
4790 if (hchan->state == BT_CONNECTED) {
4791 /* Logical link is ready to go */
4792 chan->hs_hcon = hchan->conn;
4793 chan->hs_hcon->l2cap_data = chan->conn;
4794 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4795 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4797 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4799 /* Wait for logical link to be ready */
4800 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4803 /* Logical link not available */
4804 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4808 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4810 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4812 if (result == -EINVAL)
4813 rsp_result = L2CAP_MR_BAD_ID;
4815 rsp_result = L2CAP_MR_NOT_ALLOWED;
4817 l2cap_send_move_chan_rsp(chan, rsp_result);
4820 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4821 chan->move_state = L2CAP_MOVE_STABLE;
4823 /* Restart data transmission */
4824 l2cap_ertm_send(chan);
4827 /* Invoke with locked chan */
4828 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4830 u8 local_amp_id = chan->local_amp_id;
4831 u8 remote_amp_id = chan->remote_amp_id;
4833 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4834 chan, result, local_amp_id, remote_amp_id);
4836 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4837 l2cap_chan_unlock(chan);
4841 if (chan->state != BT_CONNECTED) {
4842 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4843 } else if (result != L2CAP_MR_SUCCESS) {
4844 l2cap_do_move_cancel(chan, result);
4846 switch (chan->move_role) {
4847 case L2CAP_MOVE_ROLE_INITIATOR:
4848 l2cap_do_move_initiate(chan, local_amp_id,
4851 case L2CAP_MOVE_ROLE_RESPONDER:
4852 l2cap_do_move_respond(chan, result);
4855 l2cap_do_move_cancel(chan, result);
4861 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4862 struct l2cap_cmd_hdr *cmd,
4863 u16 cmd_len, void *data)
4865 struct l2cap_move_chan_req *req = data;
4866 struct l2cap_move_chan_rsp rsp;
4867 struct l2cap_chan *chan;
4869 u16 result = L2CAP_MR_NOT_ALLOWED;
4871 if (cmd_len != sizeof(*req))
4874 icid = le16_to_cpu(req->icid);
4876 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4878 if (!conn->hs_enabled)
4881 chan = l2cap_get_chan_by_dcid(conn, icid);
4883 rsp.icid = cpu_to_le16(icid);
4884 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4885 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4890 chan->ident = cmd->ident;
4892 if (chan->scid < L2CAP_CID_DYN_START ||
4893 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4894 (chan->mode != L2CAP_MODE_ERTM &&
4895 chan->mode != L2CAP_MODE_STREAMING)) {
4896 result = L2CAP_MR_NOT_ALLOWED;
4897 goto send_move_response;
4900 if (chan->local_amp_id == req->dest_amp_id) {
4901 result = L2CAP_MR_SAME_ID;
4902 goto send_move_response;
4905 if (req->dest_amp_id != AMP_ID_BREDR) {
4906 struct hci_dev *hdev;
4907 hdev = hci_dev_get(req->dest_amp_id);
4908 if (!hdev || hdev->dev_type != HCI_AMP ||
4909 !test_bit(HCI_UP, &hdev->flags)) {
4913 result = L2CAP_MR_BAD_ID;
4914 goto send_move_response;
4919 /* Detect a move collision. Only send a collision response
4920 * if this side has "lost", otherwise proceed with the move.
4921 * The winner has the larger bd_addr.
4923 if ((__chan_is_moving(chan) ||
4924 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4925 bacmp(conn->src, conn->dst) > 0) {
4926 result = L2CAP_MR_COLLISION;
4927 goto send_move_response;
4930 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4931 l2cap_move_setup(chan);
4932 chan->move_id = req->dest_amp_id;
4935 if (req->dest_amp_id == AMP_ID_BREDR) {
4936 /* Moving to BR/EDR */
4937 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4938 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4939 result = L2CAP_MR_PEND;
4941 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4942 result = L2CAP_MR_SUCCESS;
4945 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4946 /* Placeholder - uncomment when amp functions are available */
4947 /*amp_accept_physical(chan, req->dest_amp_id);*/
4948 result = L2CAP_MR_PEND;
4952 l2cap_send_move_chan_rsp(chan, result);
4954 l2cap_chan_unlock(chan);
4959 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4961 struct l2cap_chan *chan;
4962 struct hci_chan *hchan = NULL;
4964 chan = l2cap_get_chan_by_scid(conn, icid);
4966 l2cap_send_move_chan_cfm_icid(conn, icid);
4970 __clear_chan_timer(chan);
4971 if (result == L2CAP_MR_PEND)
4972 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4974 switch (chan->move_state) {
4975 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4976 /* Move confirm will be sent when logical link
4979 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4981 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4982 if (result == L2CAP_MR_PEND) {
4984 } else if (test_bit(CONN_LOCAL_BUSY,
4985 &chan->conn_state)) {
4986 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4988 /* Logical link is up or moving to BR/EDR,
4991 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4992 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4995 case L2CAP_MOVE_WAIT_RSP:
4997 if (result == L2CAP_MR_SUCCESS) {
4998 /* Remote is ready, send confirm immediately
4999 * after logical link is ready
5001 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5003 /* Both logical link and move success
5004 * are required to confirm
5006 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5009 /* Placeholder - get hci_chan for logical link */
5011 /* Logical link not available */
5012 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5016 /* If the logical link is not yet connected, do not
5017 * send confirmation.
5019 if (hchan->state != BT_CONNECTED)
5022 /* Logical link is already ready to go */
5024 chan->hs_hcon = hchan->conn;
5025 chan->hs_hcon->l2cap_data = chan->conn;
5027 if (result == L2CAP_MR_SUCCESS) {
5028 /* Can confirm now */
5029 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5031 /* Now only need move success
5034 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5037 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5040 /* Any other amp move state means the move failed. */
5041 chan->move_id = chan->local_amp_id;
5042 l2cap_move_done(chan);
5043 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5046 l2cap_chan_unlock(chan);
5049 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5052 struct l2cap_chan *chan;
5054 chan = l2cap_get_chan_by_ident(conn, ident);
5056 /* Could not locate channel, icid is best guess */
5057 l2cap_send_move_chan_cfm_icid(conn, icid);
5061 __clear_chan_timer(chan);
5063 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5064 if (result == L2CAP_MR_COLLISION) {
5065 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5067 /* Cleanup - cancel move */
5068 chan->move_id = chan->local_amp_id;
5069 l2cap_move_done(chan);
5073 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5075 l2cap_chan_unlock(chan);
5078 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5079 struct l2cap_cmd_hdr *cmd,
5080 u16 cmd_len, void *data)
5082 struct l2cap_move_chan_rsp *rsp = data;
5085 if (cmd_len != sizeof(*rsp))
5088 icid = le16_to_cpu(rsp->icid);
5089 result = le16_to_cpu(rsp->result);
5091 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5093 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5094 l2cap_move_continue(conn, icid, result);
5096 l2cap_move_fail(conn, cmd->ident, icid, result);
5101 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5102 struct l2cap_cmd_hdr *cmd,
5103 u16 cmd_len, void *data)
5105 struct l2cap_move_chan_cfm *cfm = data;
5106 struct l2cap_chan *chan;
5109 if (cmd_len != sizeof(*cfm))
5112 icid = le16_to_cpu(cfm->icid);
5113 result = le16_to_cpu(cfm->result);
5115 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5117 chan = l2cap_get_chan_by_dcid(conn, icid);
5119 /* Spec requires a response even if the icid was not found */
5120 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5124 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5125 if (result == L2CAP_MC_CONFIRMED) {
5126 chan->local_amp_id = chan->move_id;
5127 if (chan->local_amp_id == AMP_ID_BREDR)
5128 __release_logical_link(chan);
5130 chan->move_id = chan->local_amp_id;
5133 l2cap_move_done(chan);
5136 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5138 l2cap_chan_unlock(chan);
5143 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5144 struct l2cap_cmd_hdr *cmd,
5145 u16 cmd_len, void *data)
5147 struct l2cap_move_chan_cfm_rsp *rsp = data;
5148 struct l2cap_chan *chan;
5151 if (cmd_len != sizeof(*rsp))
5154 icid = le16_to_cpu(rsp->icid);
5156 BT_DBG("icid 0x%4.4x", icid);
5158 chan = l2cap_get_chan_by_scid(conn, icid);
5162 __clear_chan_timer(chan);
5164 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5165 chan->local_amp_id = chan->move_id;
5167 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5168 __release_logical_link(chan);
5170 l2cap_move_done(chan);
5173 l2cap_chan_unlock(chan);
5178 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5183 if (min > max || min < 6 || max > 3200)
5186 if (to_multiplier < 10 || to_multiplier > 3200)
5189 if (max >= to_multiplier * 8)
5192 max_latency = (to_multiplier * 8 / max) - 1;
5193 if (latency > 499 || latency > max_latency)
5199 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5200 struct l2cap_cmd_hdr *cmd,
5203 struct hci_conn *hcon = conn->hcon;
5204 struct l2cap_conn_param_update_req *req;
5205 struct l2cap_conn_param_update_rsp rsp;
5206 u16 min, max, latency, to_multiplier, cmd_len;
5209 if (!(hcon->link_mode & HCI_LM_MASTER))
5212 cmd_len = __le16_to_cpu(cmd->len);
5213 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5216 req = (struct l2cap_conn_param_update_req *) data;
5217 min = __le16_to_cpu(req->min);
5218 max = __le16_to_cpu(req->max);
5219 latency = __le16_to_cpu(req->latency);
5220 to_multiplier = __le16_to_cpu(req->to_multiplier);
5222 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5223 min, max, latency, to_multiplier);
5225 memset(&rsp, 0, sizeof(rsp));
5227 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5229 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5231 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5233 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5237 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5242 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5243 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5248 switch (cmd->code) {
5249 case L2CAP_COMMAND_REJ:
5250 l2cap_command_rej(conn, cmd, cmd_len, data);
5253 case L2CAP_CONN_REQ:
5254 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5257 case L2CAP_CONN_RSP:
5258 case L2CAP_CREATE_CHAN_RSP:
5259 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5262 case L2CAP_CONF_REQ:
5263 err = l2cap_config_req(conn, cmd, cmd_len, data);
5266 case L2CAP_CONF_RSP:
5267 l2cap_config_rsp(conn, cmd, cmd_len, data);
5270 case L2CAP_DISCONN_REQ:
5271 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5274 case L2CAP_DISCONN_RSP:
5275 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5278 case L2CAP_ECHO_REQ:
5279 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5282 case L2CAP_ECHO_RSP:
5285 case L2CAP_INFO_REQ:
5286 err = l2cap_information_req(conn, cmd, cmd_len, data);
5289 case L2CAP_INFO_RSP:
5290 l2cap_information_rsp(conn, cmd, cmd_len, data);
5293 case L2CAP_CREATE_CHAN_REQ:
5294 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5297 case L2CAP_MOVE_CHAN_REQ:
5298 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5301 case L2CAP_MOVE_CHAN_RSP:
5302 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5305 case L2CAP_MOVE_CHAN_CFM:
5306 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5309 case L2CAP_MOVE_CHAN_CFM_RSP:
5310 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5314 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5322 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5323 struct l2cap_cmd_hdr *cmd, u8 *data)
5325 switch (cmd->code) {
5326 case L2CAP_COMMAND_REJ:
5329 case L2CAP_CONN_PARAM_UPDATE_REQ:
5330 return l2cap_conn_param_update_req(conn, cmd, data);
5332 case L2CAP_CONN_PARAM_UPDATE_RSP:
5336 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5341 static __le16 l2cap_err_to_reason(int err)
5345 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5347 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5351 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5355 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5356 struct sk_buff *skb)
5358 struct hci_conn *hcon = conn->hcon;
5359 struct l2cap_cmd_hdr *cmd;
5363 if (hcon->type != LE_LINK)
5366 if (skb->len < L2CAP_CMD_HDR_SIZE)
5369 cmd = (void *) skb->data;
5370 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5372 len = le16_to_cpu(cmd->len);
5374 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5376 if (len != skb->len || !cmd->ident) {
5377 BT_DBG("corrupted command");
5381 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5383 struct l2cap_cmd_rej_unk rej;
5385 BT_ERR("Wrong link type (%d)", err);
5387 rej.reason = l2cap_err_to_reason(err);
5388 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5396 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5397 struct sk_buff *skb)
5399 struct hci_conn *hcon = conn->hcon;
5400 u8 *data = skb->data;
5402 struct l2cap_cmd_hdr cmd;
5405 l2cap_raw_recv(conn, skb);
5407 if (hcon->type != ACL_LINK)
5410 while (len >= L2CAP_CMD_HDR_SIZE) {
5412 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5413 data += L2CAP_CMD_HDR_SIZE;
5414 len -= L2CAP_CMD_HDR_SIZE;
5416 cmd_len = le16_to_cpu(cmd.len);
5418 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5421 if (cmd_len > len || !cmd.ident) {
5422 BT_DBG("corrupted command");
5426 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5428 struct l2cap_cmd_rej_unk rej;
5430 BT_ERR("Wrong link type (%d)", err);
5432 rej.reason = l2cap_err_to_reason(err);
5433 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5445 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5447 u16 our_fcs, rcv_fcs;
5450 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5451 hdr_size = L2CAP_EXT_HDR_SIZE;
5453 hdr_size = L2CAP_ENH_HDR_SIZE;
5455 if (chan->fcs == L2CAP_FCS_CRC16) {
5456 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5457 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5458 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5460 if (our_fcs != rcv_fcs)
5466 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5468 struct l2cap_ctrl control;
5470 BT_DBG("chan %p", chan);
5472 memset(&control, 0, sizeof(control));
5475 control.reqseq = chan->buffer_seq;
5476 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5478 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5479 control.super = L2CAP_SUPER_RNR;
5480 l2cap_send_sframe(chan, &control);
5483 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5484 chan->unacked_frames > 0)
5485 __set_retrans_timer(chan);
5487 /* Send pending iframes */
5488 l2cap_ertm_send(chan);
5490 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5491 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5492 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5495 control.super = L2CAP_SUPER_RR;
5496 l2cap_send_sframe(chan, &control);
5500 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5501 struct sk_buff **last_frag)
5503 /* skb->len reflects data in skb as well as all fragments
5504 * skb->data_len reflects only data in fragments
5506 if (!skb_has_frag_list(skb))
5507 skb_shinfo(skb)->frag_list = new_frag;
5509 new_frag->next = NULL;
5511 (*last_frag)->next = new_frag;
5512 *last_frag = new_frag;
5514 skb->len += new_frag->len;
5515 skb->data_len += new_frag->len;
5516 skb->truesize += new_frag->truesize;
5519 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5520 struct l2cap_ctrl *control)
5524 switch (control->sar) {
5525 case L2CAP_SAR_UNSEGMENTED:
5529 err = chan->ops->recv(chan, skb);
5532 case L2CAP_SAR_START:
5536 chan->sdu_len = get_unaligned_le16(skb->data);
5537 skb_pull(skb, L2CAP_SDULEN_SIZE);
5539 if (chan->sdu_len > chan->imtu) {
5544 if (skb->len >= chan->sdu_len)
5548 chan->sdu_last_frag = skb;
5554 case L2CAP_SAR_CONTINUE:
5558 append_skb_frag(chan->sdu, skb,
5559 &chan->sdu_last_frag);
5562 if (chan->sdu->len >= chan->sdu_len)
5572 append_skb_frag(chan->sdu, skb,
5573 &chan->sdu_last_frag);
5576 if (chan->sdu->len != chan->sdu_len)
5579 err = chan->ops->recv(chan, chan->sdu);
5582 /* Reassembly complete */
5584 chan->sdu_last_frag = NULL;
5592 kfree_skb(chan->sdu);
5594 chan->sdu_last_frag = NULL;
5601 static int l2cap_resegment(struct l2cap_chan *chan)
5607 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5611 if (chan->mode != L2CAP_MODE_ERTM)
5614 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5615 l2cap_tx(chan, NULL, NULL, event);
5618 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5621 /* Pass sequential frames to l2cap_reassemble_sdu()
5622 * until a gap is encountered.
5625 BT_DBG("chan %p", chan);
5627 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5628 struct sk_buff *skb;
5629 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5630 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5632 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5637 skb_unlink(skb, &chan->srej_q);
5638 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5639 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5644 if (skb_queue_empty(&chan->srej_q)) {
5645 chan->rx_state = L2CAP_RX_STATE_RECV;
5646 l2cap_send_ack(chan);
5652 static void l2cap_handle_srej(struct l2cap_chan *chan,
5653 struct l2cap_ctrl *control)
5655 struct sk_buff *skb;
5657 BT_DBG("chan %p, control %p", chan, control);
5659 if (control->reqseq == chan->next_tx_seq) {
5660 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5661 l2cap_send_disconn_req(chan, ECONNRESET);
5665 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5668 BT_DBG("Seq %d not available for retransmission",
5673 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5674 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5675 l2cap_send_disconn_req(chan, ECONNRESET);
5679 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5681 if (control->poll) {
5682 l2cap_pass_to_tx(chan, control);
5684 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5685 l2cap_retransmit(chan, control);
5686 l2cap_ertm_send(chan);
5688 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5689 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5690 chan->srej_save_reqseq = control->reqseq;
5693 l2cap_pass_to_tx_fbit(chan, control);
5695 if (control->final) {
5696 if (chan->srej_save_reqseq != control->reqseq ||
5697 !test_and_clear_bit(CONN_SREJ_ACT,
5699 l2cap_retransmit(chan, control);
5701 l2cap_retransmit(chan, control);
5702 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5703 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5704 chan->srej_save_reqseq = control->reqseq;
5710 static void l2cap_handle_rej(struct l2cap_chan *chan,
5711 struct l2cap_ctrl *control)
5713 struct sk_buff *skb;
5715 BT_DBG("chan %p, control %p", chan, control);
5717 if (control->reqseq == chan->next_tx_seq) {
5718 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5719 l2cap_send_disconn_req(chan, ECONNRESET);
5723 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5725 if (chan->max_tx && skb &&
5726 bt_cb(skb)->control.retries >= chan->max_tx) {
5727 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5728 l2cap_send_disconn_req(chan, ECONNRESET);
5732 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5734 l2cap_pass_to_tx(chan, control);
5736 if (control->final) {
5737 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5738 l2cap_retransmit_all(chan, control);
5740 l2cap_retransmit_all(chan, control);
5741 l2cap_ertm_send(chan);
5742 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5743 set_bit(CONN_REJ_ACT, &chan->conn_state);
5747 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5749 BT_DBG("chan %p, txseq %d", chan, txseq);
5751 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5752 chan->expected_tx_seq);
5754 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5755 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5757 /* See notes below regarding "double poll" and
5760 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5761 BT_DBG("Invalid/Ignore - after SREJ");
5762 return L2CAP_TXSEQ_INVALID_IGNORE;
5764 BT_DBG("Invalid - in window after SREJ sent");
5765 return L2CAP_TXSEQ_INVALID;
5769 if (chan->srej_list.head == txseq) {
5770 BT_DBG("Expected SREJ");
5771 return L2CAP_TXSEQ_EXPECTED_SREJ;
5774 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5775 BT_DBG("Duplicate SREJ - txseq already stored");
5776 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5779 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5780 BT_DBG("Unexpected SREJ - not requested");
5781 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5785 if (chan->expected_tx_seq == txseq) {
5786 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5788 BT_DBG("Invalid - txseq outside tx window");
5789 return L2CAP_TXSEQ_INVALID;
5792 return L2CAP_TXSEQ_EXPECTED;
5796 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5797 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5798 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5799 return L2CAP_TXSEQ_DUPLICATE;
5802 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5803 /* A source of invalid packets is a "double poll" condition,
5804 * where delays cause us to send multiple poll packets. If
5805 * the remote stack receives and processes both polls,
5806 * sequence numbers can wrap around in such a way that a
5807 * resent frame has a sequence number that looks like new data
5808 * with a sequence gap. This would trigger an erroneous SREJ
5811 * Fortunately, this is impossible with a tx window that's
5812 * less than half of the maximum sequence number, which allows
5813 * invalid frames to be safely ignored.
5815 * With tx window sizes greater than half of the tx window
5816 * maximum, the frame is invalid and cannot be ignored. This
5817 * causes a disconnect.
5820 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5821 BT_DBG("Invalid/Ignore - txseq outside tx window");
5822 return L2CAP_TXSEQ_INVALID_IGNORE;
5824 BT_DBG("Invalid - txseq outside tx window");
5825 return L2CAP_TXSEQ_INVALID;
5828 BT_DBG("Unexpected - txseq indicates missing frames");
5829 return L2CAP_TXSEQ_UNEXPECTED;
5833 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5834 struct l2cap_ctrl *control,
5835 struct sk_buff *skb, u8 event)
5838 bool skb_in_use = false;
5840 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5844 case L2CAP_EV_RECV_IFRAME:
5845 switch (l2cap_classify_txseq(chan, control->txseq)) {
5846 case L2CAP_TXSEQ_EXPECTED:
5847 l2cap_pass_to_tx(chan, control);
5849 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5850 BT_DBG("Busy, discarding expected seq %d",
5855 chan->expected_tx_seq = __next_seq(chan,
5858 chan->buffer_seq = chan->expected_tx_seq;
5861 err = l2cap_reassemble_sdu(chan, skb, control);
5865 if (control->final) {
5866 if (!test_and_clear_bit(CONN_REJ_ACT,
5867 &chan->conn_state)) {
5869 l2cap_retransmit_all(chan, control);
5870 l2cap_ertm_send(chan);
5874 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5875 l2cap_send_ack(chan);
5877 case L2CAP_TXSEQ_UNEXPECTED:
5878 l2cap_pass_to_tx(chan, control);
5880 /* Can't issue SREJ frames in the local busy state.
5881 * Drop this frame, it will be seen as missing
5882 * when local busy is exited.
5884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5885 BT_DBG("Busy, discarding unexpected seq %d",
5890 /* There was a gap in the sequence, so an SREJ
5891 * must be sent for each missing frame. The
5892 * current frame is stored for later use.
5894 skb_queue_tail(&chan->srej_q, skb);
5896 BT_DBG("Queued %p (queue len %d)", skb,
5897 skb_queue_len(&chan->srej_q));
5899 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5900 l2cap_seq_list_clear(&chan->srej_list);
5901 l2cap_send_srej(chan, control->txseq);
5903 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5905 case L2CAP_TXSEQ_DUPLICATE:
5906 l2cap_pass_to_tx(chan, control);
5908 case L2CAP_TXSEQ_INVALID_IGNORE:
5910 case L2CAP_TXSEQ_INVALID:
5912 l2cap_send_disconn_req(chan, ECONNRESET);
5916 case L2CAP_EV_RECV_RR:
5917 l2cap_pass_to_tx(chan, control);
5918 if (control->final) {
5919 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5921 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5922 !__chan_is_moving(chan)) {
5924 l2cap_retransmit_all(chan, control);
5927 l2cap_ertm_send(chan);
5928 } else if (control->poll) {
5929 l2cap_send_i_or_rr_or_rnr(chan);
5931 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5932 &chan->conn_state) &&
5933 chan->unacked_frames)
5934 __set_retrans_timer(chan);
5936 l2cap_ertm_send(chan);
5939 case L2CAP_EV_RECV_RNR:
5940 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5941 l2cap_pass_to_tx(chan, control);
5942 if (control && control->poll) {
5943 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5944 l2cap_send_rr_or_rnr(chan, 0);
5946 __clear_retrans_timer(chan);
5947 l2cap_seq_list_clear(&chan->retrans_list);
5949 case L2CAP_EV_RECV_REJ:
5950 l2cap_handle_rej(chan, control);
5952 case L2CAP_EV_RECV_SREJ:
5953 l2cap_handle_srej(chan, control);
5959 if (skb && !skb_in_use) {
5960 BT_DBG("Freeing %p", skb);
5967 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5968 struct l2cap_ctrl *control,
5969 struct sk_buff *skb, u8 event)
5972 u16 txseq = control->txseq;
5973 bool skb_in_use = false;
5975 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5979 case L2CAP_EV_RECV_IFRAME:
5980 switch (l2cap_classify_txseq(chan, txseq)) {
5981 case L2CAP_TXSEQ_EXPECTED:
5982 /* Keep frame for reassembly later */
5983 l2cap_pass_to_tx(chan, control);
5984 skb_queue_tail(&chan->srej_q, skb);
5986 BT_DBG("Queued %p (queue len %d)", skb,
5987 skb_queue_len(&chan->srej_q));
5989 chan->expected_tx_seq = __next_seq(chan, txseq);
5991 case L2CAP_TXSEQ_EXPECTED_SREJ:
5992 l2cap_seq_list_pop(&chan->srej_list);
5994 l2cap_pass_to_tx(chan, control);
5995 skb_queue_tail(&chan->srej_q, skb);
5997 BT_DBG("Queued %p (queue len %d)", skb,
5998 skb_queue_len(&chan->srej_q));
6000 err = l2cap_rx_queued_iframes(chan);
6005 case L2CAP_TXSEQ_UNEXPECTED:
6006 /* Got a frame that can't be reassembled yet.
6007 * Save it for later, and send SREJs to cover
6008 * the missing frames.
6010 skb_queue_tail(&chan->srej_q, skb);
6012 BT_DBG("Queued %p (queue len %d)", skb,
6013 skb_queue_len(&chan->srej_q));
6015 l2cap_pass_to_tx(chan, control);
6016 l2cap_send_srej(chan, control->txseq);
6018 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6019 /* This frame was requested with an SREJ, but
6020 * some expected retransmitted frames are
6021 * missing. Request retransmission of missing
6024 skb_queue_tail(&chan->srej_q, skb);
6026 BT_DBG("Queued %p (queue len %d)", skb,
6027 skb_queue_len(&chan->srej_q));
6029 l2cap_pass_to_tx(chan, control);
6030 l2cap_send_srej_list(chan, control->txseq);
6032 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6033 /* We've already queued this frame. Drop this copy. */
6034 l2cap_pass_to_tx(chan, control);
6036 case L2CAP_TXSEQ_DUPLICATE:
6037 /* Expecting a later sequence number, so this frame
6038 * was already received. Ignore it completely.
6041 case L2CAP_TXSEQ_INVALID_IGNORE:
6043 case L2CAP_TXSEQ_INVALID:
6045 l2cap_send_disconn_req(chan, ECONNRESET);
6049 case L2CAP_EV_RECV_RR:
6050 l2cap_pass_to_tx(chan, control);
6051 if (control->final) {
6052 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6054 if (!test_and_clear_bit(CONN_REJ_ACT,
6055 &chan->conn_state)) {
6057 l2cap_retransmit_all(chan, control);
6060 l2cap_ertm_send(chan);
6061 } else if (control->poll) {
6062 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6063 &chan->conn_state) &&
6064 chan->unacked_frames) {
6065 __set_retrans_timer(chan);
6068 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6069 l2cap_send_srej_tail(chan);
6071 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6072 &chan->conn_state) &&
6073 chan->unacked_frames)
6074 __set_retrans_timer(chan);
6076 l2cap_send_ack(chan);
6079 case L2CAP_EV_RECV_RNR:
6080 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6081 l2cap_pass_to_tx(chan, control);
6082 if (control->poll) {
6083 l2cap_send_srej_tail(chan);
6085 struct l2cap_ctrl rr_control;
6086 memset(&rr_control, 0, sizeof(rr_control));
6087 rr_control.sframe = 1;
6088 rr_control.super = L2CAP_SUPER_RR;
6089 rr_control.reqseq = chan->buffer_seq;
6090 l2cap_send_sframe(chan, &rr_control);
6094 case L2CAP_EV_RECV_REJ:
6095 l2cap_handle_rej(chan, control);
6097 case L2CAP_EV_RECV_SREJ:
6098 l2cap_handle_srej(chan, control);
6102 if (skb && !skb_in_use) {
6103 BT_DBG("Freeing %p", skb);
6110 static int l2cap_finish_move(struct l2cap_chan *chan)
6112 BT_DBG("chan %p", chan);
6114 chan->rx_state = L2CAP_RX_STATE_RECV;
6117 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6119 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6121 return l2cap_resegment(chan);
6124 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6125 struct l2cap_ctrl *control,
6126 struct sk_buff *skb, u8 event)
6130 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6136 l2cap_process_reqseq(chan, control->reqseq);
6138 if (!skb_queue_empty(&chan->tx_q))
6139 chan->tx_send_head = skb_peek(&chan->tx_q);
6141 chan->tx_send_head = NULL;
6143 /* Rewind next_tx_seq to the point expected
6146 chan->next_tx_seq = control->reqseq;
6147 chan->unacked_frames = 0;
6149 err = l2cap_finish_move(chan);
6153 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6154 l2cap_send_i_or_rr_or_rnr(chan);
6156 if (event == L2CAP_EV_RECV_IFRAME)
6159 return l2cap_rx_state_recv(chan, control, NULL, event);
6162 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6163 struct l2cap_ctrl *control,
6164 struct sk_buff *skb, u8 event)
6168 if (!control->final)
6171 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6173 chan->rx_state = L2CAP_RX_STATE_RECV;
6174 l2cap_process_reqseq(chan, control->reqseq);
6176 if (!skb_queue_empty(&chan->tx_q))
6177 chan->tx_send_head = skb_peek(&chan->tx_q);
6179 chan->tx_send_head = NULL;
6181 /* Rewind next_tx_seq to the point expected
6184 chan->next_tx_seq = control->reqseq;
6185 chan->unacked_frames = 0;
6188 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6190 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6192 err = l2cap_resegment(chan);
6195 err = l2cap_rx_state_recv(chan, control, skb, event);
6200 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6202 /* Make sure reqseq is for a packet that has been sent but not acked */
6205 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6206 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6209 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6210 struct sk_buff *skb, u8 event)
6214 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6215 control, skb, event, chan->rx_state);
6217 if (__valid_reqseq(chan, control->reqseq)) {
6218 switch (chan->rx_state) {
6219 case L2CAP_RX_STATE_RECV:
6220 err = l2cap_rx_state_recv(chan, control, skb, event);
6222 case L2CAP_RX_STATE_SREJ_SENT:
6223 err = l2cap_rx_state_srej_sent(chan, control, skb,
6226 case L2CAP_RX_STATE_WAIT_P:
6227 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6229 case L2CAP_RX_STATE_WAIT_F:
6230 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6237 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6238 control->reqseq, chan->next_tx_seq,
6239 chan->expected_ack_seq);
6240 l2cap_send_disconn_req(chan, ECONNRESET);
6246 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6247 struct sk_buff *skb)
6251 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6254 if (l2cap_classify_txseq(chan, control->txseq) ==
6255 L2CAP_TXSEQ_EXPECTED) {
6256 l2cap_pass_to_tx(chan, control);
6258 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6259 __next_seq(chan, chan->buffer_seq));
6261 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6263 l2cap_reassemble_sdu(chan, skb, control);
6266 kfree_skb(chan->sdu);
6269 chan->sdu_last_frag = NULL;
6273 BT_DBG("Freeing %p", skb);
6278 chan->last_acked_seq = control->txseq;
6279 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6284 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6286 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6290 __unpack_control(chan, skb);
6295 * We can just drop the corrupted I-frame here.
6296 * Receiver will miss it and start proper recovery
6297 * procedures and ask for retransmission.
6299 if (l2cap_check_fcs(chan, skb))
6302 if (!control->sframe && control->sar == L2CAP_SAR_START)
6303 len -= L2CAP_SDULEN_SIZE;
6305 if (chan->fcs == L2CAP_FCS_CRC16)
6306 len -= L2CAP_FCS_SIZE;
6308 if (len > chan->mps) {
6309 l2cap_send_disconn_req(chan, ECONNRESET);
6313 if (!control->sframe) {
6316 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6317 control->sar, control->reqseq, control->final,
6320 /* Validate F-bit - F=0 always valid, F=1 only
6321 * valid in TX WAIT_F
6323 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6326 if (chan->mode != L2CAP_MODE_STREAMING) {
6327 event = L2CAP_EV_RECV_IFRAME;
6328 err = l2cap_rx(chan, control, skb, event);
6330 err = l2cap_stream_rx(chan, control, skb);
6334 l2cap_send_disconn_req(chan, ECONNRESET);
6336 const u8 rx_func_to_event[4] = {
6337 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6338 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6341 /* Only I-frames are expected in streaming mode */
6342 if (chan->mode == L2CAP_MODE_STREAMING)
6345 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6346 control->reqseq, control->final, control->poll,
6350 BT_ERR("Trailing bytes: %d in sframe", len);
6351 l2cap_send_disconn_req(chan, ECONNRESET);
6355 /* Validate F and P bits */
6356 if (control->final && (control->poll ||
6357 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6360 event = rx_func_to_event[control->super];
6361 if (l2cap_rx(chan, control, skb, event))
6362 l2cap_send_disconn_req(chan, ECONNRESET);
6372 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6373 struct sk_buff *skb)
6375 struct l2cap_chan *chan;
6377 chan = l2cap_get_chan_by_scid(conn, cid);
6379 if (cid == L2CAP_CID_A2MP) {
6380 chan = a2mp_channel_create(conn, skb);
6386 l2cap_chan_lock(chan);
6388 BT_DBG("unknown cid 0x%4.4x", cid);
6389 /* Drop packet and return */
6395 BT_DBG("chan %p, len %d", chan, skb->len);
6397 if (chan->state != BT_CONNECTED)
6400 switch (chan->mode) {
6401 case L2CAP_MODE_BASIC:
6402 /* If socket recv buffers overflows we drop data here
6403 * which is *bad* because L2CAP has to be reliable.
6404 * But we don't have any other choice. L2CAP doesn't
6405 * provide flow control mechanism. */
6407 if (chan->imtu < skb->len)
6410 if (!chan->ops->recv(chan, skb))
6414 case L2CAP_MODE_ERTM:
6415 case L2CAP_MODE_STREAMING:
6416 l2cap_data_rcv(chan, skb);
6420 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6428 l2cap_chan_unlock(chan);
6431 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6432 struct sk_buff *skb)
6434 struct hci_conn *hcon = conn->hcon;
6435 struct l2cap_chan *chan;
6437 if (hcon->type != ACL_LINK)
6440 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6444 BT_DBG("chan %p, len %d", chan, skb->len);
6446 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6449 if (chan->imtu < skb->len)
6452 if (!chan->ops->recv(chan, skb))
6459 static void l2cap_att_channel(struct l2cap_conn *conn,
6460 struct sk_buff *skb)
6462 struct hci_conn *hcon = conn->hcon;
6463 struct l2cap_chan *chan;
6465 if (hcon->type != LE_LINK)
6468 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6469 conn->src, conn->dst);
6473 BT_DBG("chan %p, len %d", chan, skb->len);
6475 if (chan->imtu < skb->len)
6478 if (!chan->ops->recv(chan, skb))
6485 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6487 struct l2cap_hdr *lh = (void *) skb->data;
6491 skb_pull(skb, L2CAP_HDR_SIZE);
6492 cid = __le16_to_cpu(lh->cid);
6493 len = __le16_to_cpu(lh->len);
6495 if (len != skb->len) {
6500 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6503 case L2CAP_CID_SIGNALING:
6504 l2cap_sig_channel(conn, skb);
6507 case L2CAP_CID_CONN_LESS:
6508 psm = get_unaligned((__le16 *) skb->data);
6509 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6510 l2cap_conless_channel(conn, psm, skb);
6514 l2cap_att_channel(conn, skb);
6517 case L2CAP_CID_LE_SIGNALING:
6518 l2cap_le_sig_channel(conn, skb);
6522 if (smp_sig_channel(conn, skb))
6523 l2cap_conn_del(conn->hcon, EACCES);
6527 l2cap_data_channel(conn, cid, skb);
6532 /* ---- L2CAP interface with lower layer (HCI) ---- */
6534 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6536 int exact = 0, lm1 = 0, lm2 = 0;
6537 struct l2cap_chan *c;
6539 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6541 /* Find listening sockets and check their link_mode */
6542 read_lock(&chan_list_lock);
6543 list_for_each_entry(c, &chan_list, global_l) {
6544 struct sock *sk = c->sk;
6546 if (c->state != BT_LISTEN)
6549 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6550 lm1 |= HCI_LM_ACCEPT;
6551 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6552 lm1 |= HCI_LM_MASTER;
6554 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6555 lm2 |= HCI_LM_ACCEPT;
6556 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6557 lm2 |= HCI_LM_MASTER;
6560 read_unlock(&chan_list_lock);
6562 return exact ? lm1 : lm2;
6565 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6567 struct l2cap_conn *conn;
6569 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6572 conn = l2cap_conn_add(hcon);
6574 l2cap_conn_ready(conn);
6576 l2cap_conn_del(hcon, bt_to_errno(status));
6580 int l2cap_disconn_ind(struct hci_conn *hcon)
6582 struct l2cap_conn *conn = hcon->l2cap_data;
6584 BT_DBG("hcon %p", hcon);
6587 return HCI_ERROR_REMOTE_USER_TERM;
6588 return conn->disc_reason;
6591 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6593 BT_DBG("hcon %p reason %d", hcon, reason);
6595 l2cap_conn_del(hcon, bt_to_errno(reason));
6598 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6600 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6603 if (encrypt == 0x00) {
6604 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6605 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6606 } else if (chan->sec_level == BT_SECURITY_HIGH)
6607 l2cap_chan_close(chan, ECONNREFUSED);
6609 if (chan->sec_level == BT_SECURITY_MEDIUM)
6610 __clear_chan_timer(chan);
6614 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6616 struct l2cap_conn *conn = hcon->l2cap_data;
6617 struct l2cap_chan *chan;
6622 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6624 if (hcon->type == LE_LINK) {
6625 if (!status && encrypt)
6626 smp_distribute_keys(conn, 0);
6627 cancel_delayed_work(&conn->security_timer);
6630 mutex_lock(&conn->chan_lock);
6632 list_for_each_entry(chan, &conn->chan_l, list) {
6633 l2cap_chan_lock(chan);
6635 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6636 state_to_string(chan->state));
6638 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6639 l2cap_chan_unlock(chan);
6643 if (chan->scid == L2CAP_CID_ATT) {
6644 if (!status && encrypt) {
6645 chan->sec_level = hcon->sec_level;
6646 l2cap_chan_ready(chan);
6649 l2cap_chan_unlock(chan);
6653 if (!__l2cap_no_conn_pending(chan)) {
6654 l2cap_chan_unlock(chan);
6658 if (!status && (chan->state == BT_CONNECTED ||
6659 chan->state == BT_CONFIG)) {
6660 struct sock *sk = chan->sk;
6662 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6663 sk->sk_state_change(sk);
6665 l2cap_check_encryption(chan, encrypt);
6666 l2cap_chan_unlock(chan);
6670 if (chan->state == BT_CONNECT) {
6672 l2cap_start_connection(chan);
6674 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6676 } else if (chan->state == BT_CONNECT2) {
6677 struct sock *sk = chan->sk;
6678 struct l2cap_conn_rsp rsp;
6684 if (test_bit(BT_SK_DEFER_SETUP,
6685 &bt_sk(sk)->flags)) {
6686 res = L2CAP_CR_PEND;
6687 stat = L2CAP_CS_AUTHOR_PEND;
6688 chan->ops->defer(chan);
6690 __l2cap_state_change(chan, BT_CONFIG);
6691 res = L2CAP_CR_SUCCESS;
6692 stat = L2CAP_CS_NO_INFO;
6695 __l2cap_state_change(chan, BT_DISCONN);
6696 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6697 res = L2CAP_CR_SEC_BLOCK;
6698 stat = L2CAP_CS_NO_INFO;
6703 rsp.scid = cpu_to_le16(chan->dcid);
6704 rsp.dcid = cpu_to_le16(chan->scid);
6705 rsp.result = cpu_to_le16(res);
6706 rsp.status = cpu_to_le16(stat);
6707 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6710 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6711 res == L2CAP_CR_SUCCESS) {
6713 set_bit(CONF_REQ_SENT, &chan->conf_state);
6714 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6716 l2cap_build_conf_req(chan, buf),
6718 chan->num_conf_req++;
6722 l2cap_chan_unlock(chan);
6725 mutex_unlock(&conn->chan_lock);
6730 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6732 struct l2cap_conn *conn = hcon->l2cap_data;
6733 struct l2cap_hdr *hdr;
6736 /* For AMP controller do not create l2cap conn */
6737 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6741 conn = l2cap_conn_add(hcon);
6746 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6750 case ACL_START_NO_FLUSH:
6753 BT_ERR("Unexpected start frame (len %d)", skb->len);
6754 kfree_skb(conn->rx_skb);
6755 conn->rx_skb = NULL;
6757 l2cap_conn_unreliable(conn, ECOMM);
6760 /* Start fragment always begin with Basic L2CAP header */
6761 if (skb->len < L2CAP_HDR_SIZE) {
6762 BT_ERR("Frame is too short (len %d)", skb->len);
6763 l2cap_conn_unreliable(conn, ECOMM);
6767 hdr = (struct l2cap_hdr *) skb->data;
6768 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6770 if (len == skb->len) {
6771 /* Complete frame received */
6772 l2cap_recv_frame(conn, skb);
6776 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6778 if (skb->len > len) {
6779 BT_ERR("Frame is too long (len %d, expected len %d)",
6781 l2cap_conn_unreliable(conn, ECOMM);
6785 /* Allocate skb for the complete frame (with header) */
6786 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6790 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6792 conn->rx_len = len - skb->len;
6796 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6798 if (!conn->rx_len) {
6799 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6800 l2cap_conn_unreliable(conn, ECOMM);
6804 if (skb->len > conn->rx_len) {
6805 BT_ERR("Fragment is too long (len %d, expected %d)",
6806 skb->len, conn->rx_len);
6807 kfree_skb(conn->rx_skb);
6808 conn->rx_skb = NULL;
6810 l2cap_conn_unreliable(conn, ECOMM);
6814 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6816 conn->rx_len -= skb->len;
6818 if (!conn->rx_len) {
6819 /* Complete frame received. l2cap_recv_frame
6820 * takes ownership of the skb so set the global
6821 * rx_skb pointer to NULL first.
6823 struct sk_buff *rx_skb = conn->rx_skb;
6824 conn->rx_skb = NULL;
6825 l2cap_recv_frame(conn, rx_skb);
6835 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6837 struct l2cap_chan *c;
6839 read_lock(&chan_list_lock);
6841 list_for_each_entry(c, &chan_list, global_l) {
6842 struct sock *sk = c->sk;
6844 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6845 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6846 c->state, __le16_to_cpu(c->psm),
6847 c->scid, c->dcid, c->imtu, c->omtu,
6848 c->sec_level, c->mode);
6851 read_unlock(&chan_list_lock);
6856 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6858 return single_open(file, l2cap_debugfs_show, inode->i_private);
6861 static const struct file_operations l2cap_debugfs_fops = {
6862 .open = l2cap_debugfs_open,
6864 .llseek = seq_lseek,
6865 .release = single_release,
6868 static struct dentry *l2cap_debugfs;
6870 int __init l2cap_init(void)
6874 err = l2cap_init_sockets();
6879 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6880 NULL, &l2cap_debugfs_fops);
6882 BT_ERR("Failed to create L2CAP debug file");
6888 void l2cap_exit(void)
6890 debugfs_remove(l2cap_debugfs);
6891 l2cap_cleanup_sockets();
6894 module_param(disable_ertm, bool, 0644);
6895 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");