2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state);
235 static void l2cap_state_change(struct l2cap_chan *chan, int state)
237 struct sock *sk = chan->sk;
240 __l2cap_state_change(chan, state);
244 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
246 struct sock *sk = chan->sk;
251 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
253 struct sock *sk = chan->sk;
256 __l2cap_chan_set_err(chan, err);
260 static void __set_retrans_timer(struct l2cap_chan *chan)
262 if (!delayed_work_pending(&chan->monitor_timer) &&
263 chan->retrans_timeout) {
264 l2cap_set_timer(chan, &chan->retrans_timer,
265 msecs_to_jiffies(chan->retrans_timeout));
269 static void __set_monitor_timer(struct l2cap_chan *chan)
271 __clear_retrans_timer(chan);
272 if (chan->monitor_timeout) {
273 l2cap_set_timer(chan, &chan->monitor_timer,
274 msecs_to_jiffies(chan->monitor_timeout));
278 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
283 skb_queue_walk(head, skb) {
284 if (bt_cb(skb)->control.txseq == seq)
291 /* ---- L2CAP sequence number lists ---- */
293 /* For ERTM, ordered lists of sequence numbers must be tracked for
294 * SREJ requests that are received and for frames that are to be
295 * retransmitted. These seq_list functions implement a singly-linked
296 * list in an array, where membership in the list can also be checked
297 * in constant time. Items can also be added to the tail of the list
298 * and removed from the head in constant time, without further memory
302 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
304 size_t alloc_size, i;
306 /* Allocated size is a power of 2 to map sequence numbers
307 * (which may be up to 14 bits) in to a smaller array that is
308 * sized for the negotiated ERTM transmit windows.
310 alloc_size = roundup_pow_of_two(size);
312 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
316 seq_list->mask = alloc_size - 1;
317 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
318 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
319 for (i = 0; i < alloc_size; i++)
320 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
325 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
327 kfree(seq_list->list);
330 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 /* Constant-time check for list membership */
334 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
339 u16 mask = seq_list->mask;
341 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
342 /* In case someone tries to pop the head of an empty list */
343 return L2CAP_SEQ_LIST_CLEAR;
344 } else if (seq_list->head == seq) {
345 /* Head can be removed in constant time */
346 seq_list->head = seq_list->list[seq & mask];
347 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
349 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
350 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
351 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
354 /* Walk the list to find the sequence number */
355 u16 prev = seq_list->head;
356 while (seq_list->list[prev & mask] != seq) {
357 prev = seq_list->list[prev & mask];
358 if (prev == L2CAP_SEQ_LIST_TAIL)
359 return L2CAP_SEQ_LIST_CLEAR;
362 /* Unlink the number from the list and clear it */
363 seq_list->list[prev & mask] = seq_list->list[seq & mask];
364 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
365 if (seq_list->tail == seq)
366 seq_list->tail = prev;
371 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
373 /* Remove the head in constant time */
374 return l2cap_seq_list_remove(seq_list, seq_list->head);
377 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
381 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
384 for (i = 0; i <= seq_list->mask; i++)
385 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
387 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
391 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
393 u16 mask = seq_list->mask;
395 /* All appends happen in constant time */
397 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
400 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
401 seq_list->head = seq;
403 seq_list->list[seq_list->tail & mask] = seq;
405 seq_list->tail = seq;
406 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
409 static void l2cap_chan_timeout(struct work_struct *work)
411 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
413 struct l2cap_conn *conn = chan->conn;
416 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
418 mutex_lock(&conn->chan_lock);
419 l2cap_chan_lock(chan);
421 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
422 reason = ECONNREFUSED;
423 else if (chan->state == BT_CONNECT &&
424 chan->sec_level != BT_SECURITY_SDP)
425 reason = ECONNREFUSED;
429 l2cap_chan_close(chan, reason);
431 l2cap_chan_unlock(chan);
433 chan->ops->close(chan);
434 mutex_unlock(&conn->chan_lock);
436 l2cap_chan_put(chan);
439 struct l2cap_chan *l2cap_chan_create(void)
441 struct l2cap_chan *chan;
443 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
447 mutex_init(&chan->lock);
449 write_lock(&chan_list_lock);
450 list_add(&chan->global_l, &chan_list);
451 write_unlock(&chan_list_lock);
453 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
455 chan->state = BT_OPEN;
457 kref_init(&chan->kref);
459 /* This flag is cleared in l2cap_chan_ready() */
460 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
462 BT_DBG("chan %p", chan);
467 static void l2cap_chan_destroy(struct kref *kref)
469 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
471 BT_DBG("chan %p", chan);
473 write_lock(&chan_list_lock);
474 list_del(&chan->global_l);
475 write_unlock(&chan_list_lock);
480 void l2cap_chan_hold(struct l2cap_chan *c)
482 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
487 void l2cap_chan_put(struct l2cap_chan *c)
489 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
491 kref_put(&c->kref, l2cap_chan_destroy);
494 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
496 chan->fcs = L2CAP_FCS_CRC16;
497 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
498 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
499 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
500 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
501 chan->sec_level = BT_SECURITY_LOW;
503 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
506 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
508 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
509 __le16_to_cpu(chan->psm), chan->dcid);
511 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
515 switch (chan->chan_type) {
516 case L2CAP_CHAN_CONN_ORIENTED:
517 if (conn->hcon->type == LE_LINK) {
519 chan->omtu = L2CAP_DEFAULT_MTU;
520 if (chan->dcid == L2CAP_CID_ATT)
521 chan->scid = L2CAP_CID_ATT;
523 chan->scid = l2cap_alloc_cid(conn);
525 /* Alloc CID for connection-oriented socket */
526 chan->scid = l2cap_alloc_cid(conn);
527 chan->omtu = L2CAP_DEFAULT_MTU;
531 case L2CAP_CHAN_CONN_LESS:
532 /* Connectionless socket */
533 chan->scid = L2CAP_CID_CONN_LESS;
534 chan->dcid = L2CAP_CID_CONN_LESS;
535 chan->omtu = L2CAP_DEFAULT_MTU;
538 case L2CAP_CHAN_CONN_FIX_A2MP:
539 chan->scid = L2CAP_CID_A2MP;
540 chan->dcid = L2CAP_CID_A2MP;
541 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
542 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
546 /* Raw socket can send/recv signalling messages only */
547 chan->scid = L2CAP_CID_SIGNALING;
548 chan->dcid = L2CAP_CID_SIGNALING;
549 chan->omtu = L2CAP_DEFAULT_MTU;
552 chan->local_id = L2CAP_BESTEFFORT_ID;
553 chan->local_stype = L2CAP_SERV_BESTEFFORT;
554 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
555 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
556 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
557 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
559 l2cap_chan_hold(chan);
561 hci_conn_hold(conn->hcon);
563 list_add(&chan->list, &conn->chan_l);
566 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
568 mutex_lock(&conn->chan_lock);
569 __l2cap_chan_add(conn, chan);
570 mutex_unlock(&conn->chan_lock);
573 void l2cap_chan_del(struct l2cap_chan *chan, int err)
575 struct l2cap_conn *conn = chan->conn;
577 __clear_chan_timer(chan);
579 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
582 struct amp_mgr *mgr = conn->hcon->amp_mgr;
583 /* Delete from channel list */
584 list_del(&chan->list);
586 l2cap_chan_put(chan);
590 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
591 hci_conn_drop(conn->hcon);
593 if (mgr && mgr->bredr_chan == chan)
594 mgr->bredr_chan = NULL;
597 if (chan->hs_hchan) {
598 struct hci_chan *hs_hchan = chan->hs_hchan;
600 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
601 amp_disconnect_logical_link(hs_hchan);
604 chan->ops->teardown(chan, err);
606 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
610 case L2CAP_MODE_BASIC:
613 case L2CAP_MODE_ERTM:
614 __clear_retrans_timer(chan);
615 __clear_monitor_timer(chan);
616 __clear_ack_timer(chan);
618 skb_queue_purge(&chan->srej_q);
620 l2cap_seq_list_free(&chan->srej_list);
621 l2cap_seq_list_free(&chan->retrans_list);
625 case L2CAP_MODE_STREAMING:
626 skb_queue_purge(&chan->tx_q);
633 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
635 struct l2cap_conn *conn = chan->conn;
637 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
639 switch (chan->state) {
641 chan->ops->teardown(chan, 0);
646 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
647 conn->hcon->type == ACL_LINK) {
648 struct sock *sk = chan->sk;
649 __set_chan_timer(chan, sk->sk_sndtimeo);
650 l2cap_send_disconn_req(chan, reason);
652 l2cap_chan_del(chan, reason);
656 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
657 conn->hcon->type == ACL_LINK) {
658 struct l2cap_conn_rsp rsp;
661 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
662 result = L2CAP_CR_SEC_BLOCK;
664 result = L2CAP_CR_BAD_PSM;
666 l2cap_state_change(chan, BT_DISCONN);
668 rsp.scid = cpu_to_le16(chan->dcid);
669 rsp.dcid = cpu_to_le16(chan->scid);
670 rsp.result = cpu_to_le16(result);
671 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
672 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
676 l2cap_chan_del(chan, reason);
681 l2cap_chan_del(chan, reason);
685 chan->ops->teardown(chan, 0);
690 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
692 switch (chan->chan_type) {
694 switch (chan->sec_level) {
695 case BT_SECURITY_HIGH:
696 return HCI_AT_DEDICATED_BONDING_MITM;
697 case BT_SECURITY_MEDIUM:
698 return HCI_AT_DEDICATED_BONDING;
700 return HCI_AT_NO_BONDING;
703 case L2CAP_CHAN_CONN_LESS:
704 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
705 if (chan->sec_level == BT_SECURITY_LOW)
706 chan->sec_level = BT_SECURITY_SDP;
708 if (chan->sec_level == BT_SECURITY_HIGH)
709 return HCI_AT_NO_BONDING_MITM;
711 return HCI_AT_NO_BONDING;
713 case L2CAP_CHAN_CONN_ORIENTED:
714 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
715 if (chan->sec_level == BT_SECURITY_LOW)
716 chan->sec_level = BT_SECURITY_SDP;
718 if (chan->sec_level == BT_SECURITY_HIGH)
719 return HCI_AT_NO_BONDING_MITM;
721 return HCI_AT_NO_BONDING;
725 switch (chan->sec_level) {
726 case BT_SECURITY_HIGH:
727 return HCI_AT_GENERAL_BONDING_MITM;
728 case BT_SECURITY_MEDIUM:
729 return HCI_AT_GENERAL_BONDING;
731 return HCI_AT_NO_BONDING;
737 /* Service level security */
738 int l2cap_chan_check_security(struct l2cap_chan *chan)
740 struct l2cap_conn *conn = chan->conn;
743 auth_type = l2cap_get_auth_type(chan);
745 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
748 static u8 l2cap_get_ident(struct l2cap_conn *conn)
752 /* Get next available identificator.
753 * 1 - 128 are used by kernel.
754 * 129 - 199 are reserved.
755 * 200 - 254 are used by utilities like l2ping, etc.
758 spin_lock(&conn->lock);
760 if (++conn->tx_ident > 128)
765 spin_unlock(&conn->lock);
770 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
773 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
776 BT_DBG("code 0x%2.2x", code);
781 if (lmp_no_flush_capable(conn->hcon->hdev))
782 flags = ACL_START_NO_FLUSH;
786 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
787 skb->priority = HCI_PRIO_MAX;
789 hci_send_acl(conn->hchan, skb, flags);
792 static bool __chan_is_moving(struct l2cap_chan *chan)
794 return chan->move_state != L2CAP_MOVE_STABLE &&
795 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
798 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
800 struct hci_conn *hcon = chan->conn->hcon;
803 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
806 if (chan->hs_hcon && !__chan_is_moving(chan)) {
808 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
815 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
816 lmp_no_flush_capable(hcon->hdev))
817 flags = ACL_START_NO_FLUSH;
821 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
822 hci_send_acl(chan->conn->hchan, skb, flags);
825 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
827 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
828 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
830 if (enh & L2CAP_CTRL_FRAME_TYPE) {
833 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
834 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
841 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
842 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
849 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
851 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
852 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
854 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
857 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
858 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
865 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
866 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
873 static inline void __unpack_control(struct l2cap_chan *chan,
876 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
877 __unpack_extended_control(get_unaligned_le32(skb->data),
878 &bt_cb(skb)->control);
879 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
881 __unpack_enhanced_control(get_unaligned_le16(skb->data),
882 &bt_cb(skb)->control);
883 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
887 static u32 __pack_extended_control(struct l2cap_ctrl *control)
891 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
892 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
894 if (control->sframe) {
895 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
896 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
897 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
899 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
900 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
906 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
910 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
911 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
913 if (control->sframe) {
914 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
915 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
916 packed |= L2CAP_CTRL_FRAME_TYPE;
918 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
919 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
925 static inline void __pack_control(struct l2cap_chan *chan,
926 struct l2cap_ctrl *control,
929 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
930 put_unaligned_le32(__pack_extended_control(control),
931 skb->data + L2CAP_HDR_SIZE);
933 put_unaligned_le16(__pack_enhanced_control(control),
934 skb->data + L2CAP_HDR_SIZE);
938 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
940 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
941 return L2CAP_EXT_HDR_SIZE;
943 return L2CAP_ENH_HDR_SIZE;
946 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
950 struct l2cap_hdr *lh;
951 int hlen = __ertm_hdr_size(chan);
953 if (chan->fcs == L2CAP_FCS_CRC16)
954 hlen += L2CAP_FCS_SIZE;
956 skb = bt_skb_alloc(hlen, GFP_KERNEL);
959 return ERR_PTR(-ENOMEM);
961 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
962 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
963 lh->cid = cpu_to_le16(chan->dcid);
965 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
966 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
968 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
970 if (chan->fcs == L2CAP_FCS_CRC16) {
971 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
972 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
975 skb->priority = HCI_PRIO_MAX;
979 static void l2cap_send_sframe(struct l2cap_chan *chan,
980 struct l2cap_ctrl *control)
985 BT_DBG("chan %p, control %p", chan, control);
987 if (!control->sframe)
990 if (__chan_is_moving(chan))
993 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
997 if (control->super == L2CAP_SUPER_RR)
998 clear_bit(CONN_RNR_SENT, &chan->conn_state);
999 else if (control->super == L2CAP_SUPER_RNR)
1000 set_bit(CONN_RNR_SENT, &chan->conn_state);
1002 if (control->super != L2CAP_SUPER_SREJ) {
1003 chan->last_acked_seq = control->reqseq;
1004 __clear_ack_timer(chan);
1007 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1008 control->final, control->poll, control->super);
1010 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1011 control_field = __pack_extended_control(control);
1013 control_field = __pack_enhanced_control(control);
1015 skb = l2cap_create_sframe_pdu(chan, control_field);
1017 l2cap_do_send(chan, skb);
1020 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1022 struct l2cap_ctrl control;
1024 BT_DBG("chan %p, poll %d", chan, poll);
1026 memset(&control, 0, sizeof(control));
1028 control.poll = poll;
1030 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1031 control.super = L2CAP_SUPER_RNR;
1033 control.super = L2CAP_SUPER_RR;
1035 control.reqseq = chan->buffer_seq;
1036 l2cap_send_sframe(chan, &control);
1039 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1041 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1044 static bool __amp_capable(struct l2cap_chan *chan)
1046 struct l2cap_conn *conn = chan->conn;
1047 struct hci_dev *hdev;
1048 bool amp_available = false;
1050 if (!conn->hs_enabled)
1053 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1056 read_lock(&hci_dev_list_lock);
1057 list_for_each_entry(hdev, &hci_dev_list, list) {
1058 if (hdev->amp_type != AMP_TYPE_BREDR &&
1059 test_bit(HCI_UP, &hdev->flags)) {
1060 amp_available = true;
1064 read_unlock(&hci_dev_list_lock);
1066 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1067 return amp_available;
1072 static bool l2cap_check_efs(struct l2cap_chan *chan)
1074 /* Check EFS parameters */
1078 void l2cap_send_conn_req(struct l2cap_chan *chan)
1080 struct l2cap_conn *conn = chan->conn;
1081 struct l2cap_conn_req req;
1083 req.scid = cpu_to_le16(chan->scid);
1084 req.psm = chan->psm;
1086 chan->ident = l2cap_get_ident(conn);
1088 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1090 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1093 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1095 struct l2cap_create_chan_req req;
1096 req.scid = cpu_to_le16(chan->scid);
1097 req.psm = chan->psm;
1098 req.amp_id = amp_id;
1100 chan->ident = l2cap_get_ident(chan->conn);
1102 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1106 static void l2cap_move_setup(struct l2cap_chan *chan)
1108 struct sk_buff *skb;
1110 BT_DBG("chan %p", chan);
1112 if (chan->mode != L2CAP_MODE_ERTM)
1115 __clear_retrans_timer(chan);
1116 __clear_monitor_timer(chan);
1117 __clear_ack_timer(chan);
1119 chan->retry_count = 0;
1120 skb_queue_walk(&chan->tx_q, skb) {
1121 if (bt_cb(skb)->control.retries)
1122 bt_cb(skb)->control.retries = 1;
1127 chan->expected_tx_seq = chan->buffer_seq;
1129 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1130 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1131 l2cap_seq_list_clear(&chan->retrans_list);
1132 l2cap_seq_list_clear(&chan->srej_list);
1133 skb_queue_purge(&chan->srej_q);
1135 chan->tx_state = L2CAP_TX_STATE_XMIT;
1136 chan->rx_state = L2CAP_RX_STATE_MOVE;
1138 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1141 static void l2cap_move_done(struct l2cap_chan *chan)
1143 u8 move_role = chan->move_role;
1144 BT_DBG("chan %p", chan);
1146 chan->move_state = L2CAP_MOVE_STABLE;
1147 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1149 if (chan->mode != L2CAP_MODE_ERTM)
1152 switch (move_role) {
1153 case L2CAP_MOVE_ROLE_INITIATOR:
1154 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1155 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1157 case L2CAP_MOVE_ROLE_RESPONDER:
1158 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1163 static void l2cap_chan_ready(struct l2cap_chan *chan)
1165 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1166 chan->conf_state = 0;
1167 __clear_chan_timer(chan);
1169 chan->state = BT_CONNECTED;
1171 chan->ops->ready(chan);
1174 static void l2cap_start_connection(struct l2cap_chan *chan)
1176 if (__amp_capable(chan)) {
1177 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1178 a2mp_discover_amp(chan);
1180 l2cap_send_conn_req(chan);
1184 static void l2cap_do_start(struct l2cap_chan *chan)
1186 struct l2cap_conn *conn = chan->conn;
1188 if (conn->hcon->type == LE_LINK) {
1189 l2cap_chan_ready(chan);
1193 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1194 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1197 if (l2cap_chan_check_security(chan) &&
1198 __l2cap_no_conn_pending(chan)) {
1199 l2cap_start_connection(chan);
1202 struct l2cap_info_req req;
1203 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1205 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1206 conn->info_ident = l2cap_get_ident(conn);
1208 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1210 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1215 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1217 u32 local_feat_mask = l2cap_feat_mask;
1219 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1222 case L2CAP_MODE_ERTM:
1223 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1224 case L2CAP_MODE_STREAMING:
1225 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1231 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1233 struct sock *sk = chan->sk;
1234 struct l2cap_conn *conn = chan->conn;
1235 struct l2cap_disconn_req req;
1240 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1241 __clear_retrans_timer(chan);
1242 __clear_monitor_timer(chan);
1243 __clear_ack_timer(chan);
1246 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1247 l2cap_state_change(chan, BT_DISCONN);
1251 req.dcid = cpu_to_le16(chan->dcid);
1252 req.scid = cpu_to_le16(chan->scid);
1253 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1257 __l2cap_state_change(chan, BT_DISCONN);
1258 __l2cap_chan_set_err(chan, err);
1262 /* ---- L2CAP connections ---- */
1263 static void l2cap_conn_start(struct l2cap_conn *conn)
1265 struct l2cap_chan *chan, *tmp;
1267 BT_DBG("conn %p", conn);
1269 mutex_lock(&conn->chan_lock);
1271 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1272 l2cap_chan_lock(chan);
1274 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1275 l2cap_chan_unlock(chan);
1279 if (chan->state == BT_CONNECT) {
1280 if (!l2cap_chan_check_security(chan) ||
1281 !__l2cap_no_conn_pending(chan)) {
1282 l2cap_chan_unlock(chan);
1286 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1287 && test_bit(CONF_STATE2_DEVICE,
1288 &chan->conf_state)) {
1289 l2cap_chan_close(chan, ECONNRESET);
1290 l2cap_chan_unlock(chan);
1294 l2cap_start_connection(chan);
1296 } else if (chan->state == BT_CONNECT2) {
1297 struct l2cap_conn_rsp rsp;
1299 rsp.scid = cpu_to_le16(chan->dcid);
1300 rsp.dcid = cpu_to_le16(chan->scid);
1302 if (l2cap_chan_check_security(chan)) {
1303 struct sock *sk = chan->sk;
1306 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1307 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1308 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1309 chan->ops->defer(chan);
1312 __l2cap_state_change(chan, BT_CONFIG);
1313 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1314 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1318 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1319 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1322 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1325 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1326 rsp.result != L2CAP_CR_SUCCESS) {
1327 l2cap_chan_unlock(chan);
1331 set_bit(CONF_REQ_SENT, &chan->conf_state);
1332 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1333 l2cap_build_conf_req(chan, buf), buf);
1334 chan->num_conf_req++;
1337 l2cap_chan_unlock(chan);
1340 mutex_unlock(&conn->chan_lock);
1343 /* Find socket with cid and source/destination bdaddr.
1344 * Returns closest match, locked.
1346 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1350 struct l2cap_chan *c, *c1 = NULL;
1352 read_lock(&chan_list_lock);
1354 list_for_each_entry(c, &chan_list, global_l) {
1355 if (state && c->state != state)
1358 if (c->scid == cid) {
1359 int src_match, dst_match;
1360 int src_any, dst_any;
1363 src_match = !bacmp(&c->src, src);
1364 dst_match = !bacmp(&c->dst, dst);
1365 if (src_match && dst_match) {
1366 read_unlock(&chan_list_lock);
1371 src_any = !bacmp(&c->src, BDADDR_ANY);
1372 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1373 if ((src_match && dst_any) || (src_any && dst_match) ||
1374 (src_any && dst_any))
1379 read_unlock(&chan_list_lock);
1384 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1386 struct sock *parent;
1387 struct l2cap_chan *chan, *pchan;
1391 /* Check if we have socket listening on cid */
1392 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1393 &conn->hcon->src, &conn->hcon->dst);
1397 /* Client ATT sockets should override the server one */
1398 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1405 chan = pchan->ops->new_connection(pchan);
1409 chan->dcid = L2CAP_CID_ATT;
1411 bacpy(&chan->src, &conn->hcon->src);
1412 bacpy(&chan->dst, &conn->hcon->dst);
1413 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
1414 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
1416 __l2cap_chan_add(conn, chan);
1419 release_sock(parent);
1422 static void l2cap_conn_ready(struct l2cap_conn *conn)
1424 struct l2cap_chan *chan;
1425 struct hci_conn *hcon = conn->hcon;
1427 BT_DBG("conn %p", conn);
1429 /* For outgoing pairing which doesn't necessarily have an
1430 * associated socket (e.g. mgmt_pair_device).
1432 if (hcon->out && hcon->type == LE_LINK)
1433 smp_conn_security(hcon, hcon->pending_sec_level);
1435 mutex_lock(&conn->chan_lock);
1437 if (hcon->type == LE_LINK)
1438 l2cap_le_conn_ready(conn);
1440 list_for_each_entry(chan, &conn->chan_l, list) {
1442 l2cap_chan_lock(chan);
1444 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1445 l2cap_chan_unlock(chan);
1449 if (hcon->type == LE_LINK) {
1450 if (smp_conn_security(hcon, chan->sec_level))
1451 l2cap_chan_ready(chan);
1453 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1454 struct sock *sk = chan->sk;
1455 __clear_chan_timer(chan);
1457 __l2cap_state_change(chan, BT_CONNECTED);
1458 sk->sk_state_change(sk);
1461 } else if (chan->state == BT_CONNECT) {
1462 l2cap_do_start(chan);
1465 l2cap_chan_unlock(chan);
1468 mutex_unlock(&conn->chan_lock);
1471 /* Notify sockets that we cannot guaranty reliability anymore */
1472 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1474 struct l2cap_chan *chan;
1476 BT_DBG("conn %p", conn);
1478 mutex_lock(&conn->chan_lock);
1480 list_for_each_entry(chan, &conn->chan_l, list) {
1481 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1482 l2cap_chan_set_err(chan, err);
1485 mutex_unlock(&conn->chan_lock);
1488 static void l2cap_info_timeout(struct work_struct *work)
1490 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1493 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1494 conn->info_ident = 0;
1496 l2cap_conn_start(conn);
1501 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1502 * callback is called during registration. The ->remove callback is called
1503 * during unregistration.
1504 * An l2cap_user object can either be explicitly unregistered or when the
1505 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1506 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1507 * External modules must own a reference to the l2cap_conn object if they intend
1508 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1509 * any time if they don't.
1512 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1514 struct hci_dev *hdev = conn->hcon->hdev;
1517 /* We need to check whether l2cap_conn is registered. If it is not, we
1518 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1519 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1520 * relies on the parent hci_conn object to be locked. This itself relies
1521 * on the hci_dev object to be locked. So we must lock the hci device
1526 if (user->list.next || user->list.prev) {
1531 /* conn->hchan is NULL after l2cap_conn_del() was called */
1537 ret = user->probe(conn, user);
1541 list_add(&user->list, &conn->users);
1545 hci_dev_unlock(hdev);
1548 EXPORT_SYMBOL(l2cap_register_user);
1550 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1552 struct hci_dev *hdev = conn->hcon->hdev;
1556 if (!user->list.next || !user->list.prev)
1559 list_del(&user->list);
1560 user->list.next = NULL;
1561 user->list.prev = NULL;
1562 user->remove(conn, user);
1565 hci_dev_unlock(hdev);
1567 EXPORT_SYMBOL(l2cap_unregister_user);
1569 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1571 struct l2cap_user *user;
1573 while (!list_empty(&conn->users)) {
1574 user = list_first_entry(&conn->users, struct l2cap_user, list);
1575 list_del(&user->list);
1576 user->list.next = NULL;
1577 user->list.prev = NULL;
1578 user->remove(conn, user);
1582 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1584 struct l2cap_conn *conn = hcon->l2cap_data;
1585 struct l2cap_chan *chan, *l;
1590 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1592 kfree_skb(conn->rx_skb);
1594 l2cap_unregister_all_users(conn);
1596 mutex_lock(&conn->chan_lock);
1599 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1600 l2cap_chan_hold(chan);
1601 l2cap_chan_lock(chan);
1603 l2cap_chan_del(chan, err);
1605 l2cap_chan_unlock(chan);
1607 chan->ops->close(chan);
1608 l2cap_chan_put(chan);
1611 mutex_unlock(&conn->chan_lock);
1613 hci_chan_del(conn->hchan);
1615 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1616 cancel_delayed_work_sync(&conn->info_timer);
1618 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1619 cancel_delayed_work_sync(&conn->security_timer);
1620 smp_chan_destroy(conn);
1623 hcon->l2cap_data = NULL;
1625 l2cap_conn_put(conn);
1628 static void security_timeout(struct work_struct *work)
1630 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1631 security_timer.work);
1633 BT_DBG("conn %p", conn);
1635 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1636 smp_chan_destroy(conn);
1637 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1641 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1643 struct l2cap_conn *conn = hcon->l2cap_data;
1644 struct hci_chan *hchan;
1649 hchan = hci_chan_create(hcon);
1653 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1655 hci_chan_del(hchan);
1659 kref_init(&conn->ref);
1660 hcon->l2cap_data = conn;
1662 hci_conn_get(conn->hcon);
1663 conn->hchan = hchan;
1665 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1667 switch (hcon->type) {
1669 if (hcon->hdev->le_mtu) {
1670 conn->mtu = hcon->hdev->le_mtu;
1675 conn->mtu = hcon->hdev->acl_mtu;
1679 conn->feat_mask = 0;
1681 if (hcon->type == ACL_LINK)
1682 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1683 &hcon->hdev->dev_flags);
1685 spin_lock_init(&conn->lock);
1686 mutex_init(&conn->chan_lock);
1688 INIT_LIST_HEAD(&conn->chan_l);
1689 INIT_LIST_HEAD(&conn->users);
1691 if (hcon->type == LE_LINK)
1692 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1694 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1696 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1701 static void l2cap_conn_free(struct kref *ref)
1703 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1705 hci_conn_put(conn->hcon);
1709 void l2cap_conn_get(struct l2cap_conn *conn)
1711 kref_get(&conn->ref);
1713 EXPORT_SYMBOL(l2cap_conn_get);
1715 void l2cap_conn_put(struct l2cap_conn *conn)
1717 kref_put(&conn->ref, l2cap_conn_free);
1719 EXPORT_SYMBOL(l2cap_conn_put);
1721 /* ---- Socket interface ---- */
1723 /* Find socket with psm and source / destination bdaddr.
1724 * Returns closest match.
1726 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1730 struct l2cap_chan *c, *c1 = NULL;
1732 read_lock(&chan_list_lock);
1734 list_for_each_entry(c, &chan_list, global_l) {
1735 if (state && c->state != state)
1738 if (c->psm == psm) {
1739 int src_match, dst_match;
1740 int src_any, dst_any;
1743 src_match = !bacmp(&c->src, src);
1744 dst_match = !bacmp(&c->dst, dst);
1745 if (src_match && dst_match) {
1746 read_unlock(&chan_list_lock);
1751 src_any = !bacmp(&c->src, BDADDR_ANY);
1752 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1753 if ((src_match && dst_any) || (src_any && dst_match) ||
1754 (src_any && dst_any))
1759 read_unlock(&chan_list_lock);
1764 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1765 bdaddr_t *dst, u8 dst_type)
1767 struct sock *sk = chan->sk;
1768 struct l2cap_conn *conn;
1769 struct hci_conn *hcon;
1770 struct hci_dev *hdev;
1774 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1775 dst_type, __le16_to_cpu(psm));
1777 hdev = hci_get_route(dst, &chan->src);
1779 return -EHOSTUNREACH;
1783 l2cap_chan_lock(chan);
1785 /* PSM must be odd and lsb of upper byte must be 0 */
1786 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1787 chan->chan_type != L2CAP_CHAN_RAW) {
1792 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1797 switch (chan->mode) {
1798 case L2CAP_MODE_BASIC:
1800 case L2CAP_MODE_ERTM:
1801 case L2CAP_MODE_STREAMING:
1810 switch (chan->state) {
1814 /* Already connecting */
1819 /* Already connected */
1833 /* Set destination address and psm */
1834 bacpy(&chan->dst, dst);
1835 chan->dst_type = dst_type;
1840 auth_type = l2cap_get_auth_type(chan);
1842 if (bdaddr_type_is_le(dst_type))
1843 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1844 chan->sec_level, auth_type);
1846 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1847 chan->sec_level, auth_type);
1850 err = PTR_ERR(hcon);
1854 conn = l2cap_conn_add(hcon);
1856 hci_conn_drop(hcon);
1861 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1862 hci_conn_drop(hcon);
1867 /* Update source addr of the socket */
1868 bacpy(&chan->src, &hcon->src);
1869 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1871 l2cap_chan_unlock(chan);
1872 l2cap_chan_add(conn, chan);
1873 l2cap_chan_lock(chan);
1875 /* l2cap_chan_add takes its own ref so we can drop this one */
1876 hci_conn_drop(hcon);
1878 l2cap_state_change(chan, BT_CONNECT);
1879 __set_chan_timer(chan, sk->sk_sndtimeo);
1881 if (hcon->state == BT_CONNECTED) {
1882 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1883 __clear_chan_timer(chan);
1884 if (l2cap_chan_check_security(chan))
1885 l2cap_state_change(chan, BT_CONNECTED);
1887 l2cap_do_start(chan);
1893 l2cap_chan_unlock(chan);
1894 hci_dev_unlock(hdev);
1899 int __l2cap_wait_ack(struct sock *sk)
1901 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1902 DECLARE_WAITQUEUE(wait, current);
1906 add_wait_queue(sk_sleep(sk), &wait);
1907 set_current_state(TASK_INTERRUPTIBLE);
1908 while (chan->unacked_frames > 0 && chan->conn) {
1912 if (signal_pending(current)) {
1913 err = sock_intr_errno(timeo);
1918 timeo = schedule_timeout(timeo);
1920 set_current_state(TASK_INTERRUPTIBLE);
1922 err = sock_error(sk);
1926 set_current_state(TASK_RUNNING);
1927 remove_wait_queue(sk_sleep(sk), &wait);
1931 static void l2cap_monitor_timeout(struct work_struct *work)
1933 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1934 monitor_timer.work);
1936 BT_DBG("chan %p", chan);
1938 l2cap_chan_lock(chan);
1941 l2cap_chan_unlock(chan);
1942 l2cap_chan_put(chan);
1946 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1948 l2cap_chan_unlock(chan);
1949 l2cap_chan_put(chan);
1952 static void l2cap_retrans_timeout(struct work_struct *work)
1954 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1955 retrans_timer.work);
1957 BT_DBG("chan %p", chan);
1959 l2cap_chan_lock(chan);
1962 l2cap_chan_unlock(chan);
1963 l2cap_chan_put(chan);
1967 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1968 l2cap_chan_unlock(chan);
1969 l2cap_chan_put(chan);
1972 static void l2cap_streaming_send(struct l2cap_chan *chan,
1973 struct sk_buff_head *skbs)
1975 struct sk_buff *skb;
1976 struct l2cap_ctrl *control;
1978 BT_DBG("chan %p, skbs %p", chan, skbs);
1980 if (__chan_is_moving(chan))
1983 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1985 while (!skb_queue_empty(&chan->tx_q)) {
1987 skb = skb_dequeue(&chan->tx_q);
1989 bt_cb(skb)->control.retries = 1;
1990 control = &bt_cb(skb)->control;
1992 control->reqseq = 0;
1993 control->txseq = chan->next_tx_seq;
1995 __pack_control(chan, control, skb);
1997 if (chan->fcs == L2CAP_FCS_CRC16) {
1998 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1999 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2002 l2cap_do_send(chan, skb);
2004 BT_DBG("Sent txseq %u", control->txseq);
2006 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 chan->frames_sent++;
2011 static int l2cap_ertm_send(struct l2cap_chan *chan)
2013 struct sk_buff *skb, *tx_skb;
2014 struct l2cap_ctrl *control;
2017 BT_DBG("chan %p", chan);
2019 if (chan->state != BT_CONNECTED)
2022 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2025 if (__chan_is_moving(chan))
2028 while (chan->tx_send_head &&
2029 chan->unacked_frames < chan->remote_tx_win &&
2030 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2032 skb = chan->tx_send_head;
2034 bt_cb(skb)->control.retries = 1;
2035 control = &bt_cb(skb)->control;
2037 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2040 control->reqseq = chan->buffer_seq;
2041 chan->last_acked_seq = chan->buffer_seq;
2042 control->txseq = chan->next_tx_seq;
2044 __pack_control(chan, control, skb);
2046 if (chan->fcs == L2CAP_FCS_CRC16) {
2047 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2048 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2051 /* Clone after data has been modified. Data is assumed to be
2052 read-only (for locking purposes) on cloned sk_buffs.
2054 tx_skb = skb_clone(skb, GFP_KERNEL);
2059 __set_retrans_timer(chan);
2061 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2062 chan->unacked_frames++;
2063 chan->frames_sent++;
2066 if (skb_queue_is_last(&chan->tx_q, skb))
2067 chan->tx_send_head = NULL;
2069 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2071 l2cap_do_send(chan, tx_skb);
2072 BT_DBG("Sent txseq %u", control->txseq);
2075 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2076 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2081 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2083 struct l2cap_ctrl control;
2084 struct sk_buff *skb;
2085 struct sk_buff *tx_skb;
2088 BT_DBG("chan %p", chan);
2090 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2093 if (__chan_is_moving(chan))
2096 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2097 seq = l2cap_seq_list_pop(&chan->retrans_list);
2099 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2101 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2106 bt_cb(skb)->control.retries++;
2107 control = bt_cb(skb)->control;
2109 if (chan->max_tx != 0 &&
2110 bt_cb(skb)->control.retries > chan->max_tx) {
2111 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2112 l2cap_send_disconn_req(chan, ECONNRESET);
2113 l2cap_seq_list_clear(&chan->retrans_list);
2117 control.reqseq = chan->buffer_seq;
2118 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2123 if (skb_cloned(skb)) {
2124 /* Cloned sk_buffs are read-only, so we need a
2127 tx_skb = skb_copy(skb, GFP_KERNEL);
2129 tx_skb = skb_clone(skb, GFP_KERNEL);
2133 l2cap_seq_list_clear(&chan->retrans_list);
2137 /* Update skb contents */
2138 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2139 put_unaligned_le32(__pack_extended_control(&control),
2140 tx_skb->data + L2CAP_HDR_SIZE);
2142 put_unaligned_le16(__pack_enhanced_control(&control),
2143 tx_skb->data + L2CAP_HDR_SIZE);
2146 if (chan->fcs == L2CAP_FCS_CRC16) {
2147 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2148 put_unaligned_le16(fcs, skb_put(tx_skb,
2152 l2cap_do_send(chan, tx_skb);
2154 BT_DBG("Resent txseq %d", control.txseq);
2156 chan->last_acked_seq = chan->buffer_seq;
2160 static void l2cap_retransmit(struct l2cap_chan *chan,
2161 struct l2cap_ctrl *control)
2163 BT_DBG("chan %p, control %p", chan, control);
2165 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2166 l2cap_ertm_resend(chan);
2169 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2170 struct l2cap_ctrl *control)
2172 struct sk_buff *skb;
2174 BT_DBG("chan %p, control %p", chan, control);
2177 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2179 l2cap_seq_list_clear(&chan->retrans_list);
2181 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2184 if (chan->unacked_frames) {
2185 skb_queue_walk(&chan->tx_q, skb) {
2186 if (bt_cb(skb)->control.txseq == control->reqseq ||
2187 skb == chan->tx_send_head)
2191 skb_queue_walk_from(&chan->tx_q, skb) {
2192 if (skb == chan->tx_send_head)
2195 l2cap_seq_list_append(&chan->retrans_list,
2196 bt_cb(skb)->control.txseq);
2199 l2cap_ertm_resend(chan);
2203 static void l2cap_send_ack(struct l2cap_chan *chan)
2205 struct l2cap_ctrl control;
2206 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2207 chan->last_acked_seq);
2210 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2211 chan, chan->last_acked_seq, chan->buffer_seq);
2213 memset(&control, 0, sizeof(control));
2216 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2217 chan->rx_state == L2CAP_RX_STATE_RECV) {
2218 __clear_ack_timer(chan);
2219 control.super = L2CAP_SUPER_RNR;
2220 control.reqseq = chan->buffer_seq;
2221 l2cap_send_sframe(chan, &control);
2223 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2224 l2cap_ertm_send(chan);
2225 /* If any i-frames were sent, they included an ack */
2226 if (chan->buffer_seq == chan->last_acked_seq)
2230 /* Ack now if the window is 3/4ths full.
2231 * Calculate without mul or div
2233 threshold = chan->ack_win;
2234 threshold += threshold << 1;
2237 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2240 if (frames_to_ack >= threshold) {
2241 __clear_ack_timer(chan);
2242 control.super = L2CAP_SUPER_RR;
2243 control.reqseq = chan->buffer_seq;
2244 l2cap_send_sframe(chan, &control);
2249 __set_ack_timer(chan);
2253 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2254 struct msghdr *msg, int len,
2255 int count, struct sk_buff *skb)
2257 struct l2cap_conn *conn = chan->conn;
2258 struct sk_buff **frag;
2261 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2267 /* Continuation fragments (no L2CAP header) */
2268 frag = &skb_shinfo(skb)->frag_list;
2270 struct sk_buff *tmp;
2272 count = min_t(unsigned int, conn->mtu, len);
2274 tmp = chan->ops->alloc_skb(chan, count,
2275 msg->msg_flags & MSG_DONTWAIT);
2277 return PTR_ERR(tmp);
2281 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2284 (*frag)->priority = skb->priority;
2289 skb->len += (*frag)->len;
2290 skb->data_len += (*frag)->len;
2292 frag = &(*frag)->next;
2298 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2299 struct msghdr *msg, size_t len,
2302 struct l2cap_conn *conn = chan->conn;
2303 struct sk_buff *skb;
2304 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2305 struct l2cap_hdr *lh;
2307 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2308 __le16_to_cpu(chan->psm), len, priority);
2310 count = min_t(unsigned int, (conn->mtu - hlen), len);
2312 skb = chan->ops->alloc_skb(chan, count + hlen,
2313 msg->msg_flags & MSG_DONTWAIT);
2317 skb->priority = priority;
2319 /* Create L2CAP header */
2320 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2321 lh->cid = cpu_to_le16(chan->dcid);
2322 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2323 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2325 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2326 if (unlikely(err < 0)) {
2328 return ERR_PTR(err);
2333 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2334 struct msghdr *msg, size_t len,
2337 struct l2cap_conn *conn = chan->conn;
2338 struct sk_buff *skb;
2340 struct l2cap_hdr *lh;
2342 BT_DBG("chan %p len %zu", chan, len);
2344 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2346 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2347 msg->msg_flags & MSG_DONTWAIT);
2351 skb->priority = priority;
2353 /* Create L2CAP header */
2354 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2355 lh->cid = cpu_to_le16(chan->dcid);
2356 lh->len = cpu_to_le16(len);
2358 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2359 if (unlikely(err < 0)) {
2361 return ERR_PTR(err);
2366 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2367 struct msghdr *msg, size_t len,
2370 struct l2cap_conn *conn = chan->conn;
2371 struct sk_buff *skb;
2372 int err, count, hlen;
2373 struct l2cap_hdr *lh;
2375 BT_DBG("chan %p len %zu", chan, len);
2378 return ERR_PTR(-ENOTCONN);
2380 hlen = __ertm_hdr_size(chan);
2383 hlen += L2CAP_SDULEN_SIZE;
2385 if (chan->fcs == L2CAP_FCS_CRC16)
2386 hlen += L2CAP_FCS_SIZE;
2388 count = min_t(unsigned int, (conn->mtu - hlen), len);
2390 skb = chan->ops->alloc_skb(chan, count + hlen,
2391 msg->msg_flags & MSG_DONTWAIT);
2395 /* Create L2CAP header */
2396 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2397 lh->cid = cpu_to_le16(chan->dcid);
2398 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2400 /* Control header is populated later */
2401 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2402 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2404 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2407 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2409 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2410 if (unlikely(err < 0)) {
2412 return ERR_PTR(err);
2415 bt_cb(skb)->control.fcs = chan->fcs;
2416 bt_cb(skb)->control.retries = 0;
2420 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2421 struct sk_buff_head *seg_queue,
2422 struct msghdr *msg, size_t len)
2424 struct sk_buff *skb;
2429 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2431 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2432 * so fragmented skbs are not used. The HCI layer's handling
2433 * of fragmented skbs is not compatible with ERTM's queueing.
2436 /* PDU size is derived from the HCI MTU */
2437 pdu_len = chan->conn->mtu;
2439 /* Constrain PDU size for BR/EDR connections */
2441 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2443 /* Adjust for largest possible L2CAP overhead. */
2445 pdu_len -= L2CAP_FCS_SIZE;
2447 pdu_len -= __ertm_hdr_size(chan);
2449 /* Remote device may have requested smaller PDUs */
2450 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2452 if (len <= pdu_len) {
2453 sar = L2CAP_SAR_UNSEGMENTED;
2457 sar = L2CAP_SAR_START;
2459 pdu_len -= L2CAP_SDULEN_SIZE;
2463 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2466 __skb_queue_purge(seg_queue);
2467 return PTR_ERR(skb);
2470 bt_cb(skb)->control.sar = sar;
2471 __skb_queue_tail(seg_queue, skb);
2476 pdu_len += L2CAP_SDULEN_SIZE;
2479 if (len <= pdu_len) {
2480 sar = L2CAP_SAR_END;
2483 sar = L2CAP_SAR_CONTINUE;
2490 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2493 struct sk_buff *skb;
2495 struct sk_buff_head seg_queue;
2497 /* Connectionless channel */
2498 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2499 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2501 return PTR_ERR(skb);
2503 l2cap_do_send(chan, skb);
2507 switch (chan->mode) {
2508 case L2CAP_MODE_BASIC:
2509 /* Check outgoing MTU */
2510 if (len > chan->omtu)
2513 /* Create a basic PDU */
2514 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2516 return PTR_ERR(skb);
2518 l2cap_do_send(chan, skb);
2522 case L2CAP_MODE_ERTM:
2523 case L2CAP_MODE_STREAMING:
2524 /* Check outgoing MTU */
2525 if (len > chan->omtu) {
2530 __skb_queue_head_init(&seg_queue);
2532 /* Do segmentation before calling in to the state machine,
2533 * since it's possible to block while waiting for memory
2536 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2538 /* The channel could have been closed while segmenting,
2539 * check that it is still connected.
2541 if (chan->state != BT_CONNECTED) {
2542 __skb_queue_purge(&seg_queue);
2549 if (chan->mode == L2CAP_MODE_ERTM)
2550 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2552 l2cap_streaming_send(chan, &seg_queue);
2556 /* If the skbs were not queued for sending, they'll still be in
2557 * seg_queue and need to be purged.
2559 __skb_queue_purge(&seg_queue);
2563 BT_DBG("bad state %1.1x", chan->mode);
2570 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2572 struct l2cap_ctrl control;
2575 BT_DBG("chan %p, txseq %u", chan, txseq);
2577 memset(&control, 0, sizeof(control));
2579 control.super = L2CAP_SUPER_SREJ;
2581 for (seq = chan->expected_tx_seq; seq != txseq;
2582 seq = __next_seq(chan, seq)) {
2583 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2584 control.reqseq = seq;
2585 l2cap_send_sframe(chan, &control);
2586 l2cap_seq_list_append(&chan->srej_list, seq);
2590 chan->expected_tx_seq = __next_seq(chan, txseq);
2593 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2595 struct l2cap_ctrl control;
2597 BT_DBG("chan %p", chan);
2599 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2602 memset(&control, 0, sizeof(control));
2604 control.super = L2CAP_SUPER_SREJ;
2605 control.reqseq = chan->srej_list.tail;
2606 l2cap_send_sframe(chan, &control);
2609 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2611 struct l2cap_ctrl control;
2615 BT_DBG("chan %p, txseq %u", chan, txseq);
2617 memset(&control, 0, sizeof(control));
2619 control.super = L2CAP_SUPER_SREJ;
2621 /* Capture initial list head to allow only one pass through the list. */
2622 initial_head = chan->srej_list.head;
2625 seq = l2cap_seq_list_pop(&chan->srej_list);
2626 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2629 control.reqseq = seq;
2630 l2cap_send_sframe(chan, &control);
2631 l2cap_seq_list_append(&chan->srej_list, seq);
2632 } while (chan->srej_list.head != initial_head);
2635 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2637 struct sk_buff *acked_skb;
2640 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2642 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2645 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2646 chan->expected_ack_seq, chan->unacked_frames);
2648 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2649 ackseq = __next_seq(chan, ackseq)) {
2651 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2653 skb_unlink(acked_skb, &chan->tx_q);
2654 kfree_skb(acked_skb);
2655 chan->unacked_frames--;
2659 chan->expected_ack_seq = reqseq;
2661 if (chan->unacked_frames == 0)
2662 __clear_retrans_timer(chan);
2664 BT_DBG("unacked_frames %u", chan->unacked_frames);
2667 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2669 BT_DBG("chan %p", chan);
2671 chan->expected_tx_seq = chan->buffer_seq;
2672 l2cap_seq_list_clear(&chan->srej_list);
2673 skb_queue_purge(&chan->srej_q);
2674 chan->rx_state = L2CAP_RX_STATE_RECV;
2677 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2678 struct l2cap_ctrl *control,
2679 struct sk_buff_head *skbs, u8 event)
2681 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2685 case L2CAP_EV_DATA_REQUEST:
2686 if (chan->tx_send_head == NULL)
2687 chan->tx_send_head = skb_peek(skbs);
2689 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2690 l2cap_ertm_send(chan);
2692 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2693 BT_DBG("Enter LOCAL_BUSY");
2694 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2696 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2697 /* The SREJ_SENT state must be aborted if we are to
2698 * enter the LOCAL_BUSY state.
2700 l2cap_abort_rx_srej_sent(chan);
2703 l2cap_send_ack(chan);
2706 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2707 BT_DBG("Exit LOCAL_BUSY");
2708 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2710 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2711 struct l2cap_ctrl local_control;
2713 memset(&local_control, 0, sizeof(local_control));
2714 local_control.sframe = 1;
2715 local_control.super = L2CAP_SUPER_RR;
2716 local_control.poll = 1;
2717 local_control.reqseq = chan->buffer_seq;
2718 l2cap_send_sframe(chan, &local_control);
2720 chan->retry_count = 1;
2721 __set_monitor_timer(chan);
2722 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2725 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2726 l2cap_process_reqseq(chan, control->reqseq);
2728 case L2CAP_EV_EXPLICIT_POLL:
2729 l2cap_send_rr_or_rnr(chan, 1);
2730 chan->retry_count = 1;
2731 __set_monitor_timer(chan);
2732 __clear_ack_timer(chan);
2733 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2735 case L2CAP_EV_RETRANS_TO:
2736 l2cap_send_rr_or_rnr(chan, 1);
2737 chan->retry_count = 1;
2738 __set_monitor_timer(chan);
2739 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2741 case L2CAP_EV_RECV_FBIT:
2742 /* Nothing to process */
2749 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2750 struct l2cap_ctrl *control,
2751 struct sk_buff_head *skbs, u8 event)
2753 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2757 case L2CAP_EV_DATA_REQUEST:
2758 if (chan->tx_send_head == NULL)
2759 chan->tx_send_head = skb_peek(skbs);
2760 /* Queue data, but don't send. */
2761 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2763 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2764 BT_DBG("Enter LOCAL_BUSY");
2765 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2767 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2768 /* The SREJ_SENT state must be aborted if we are to
2769 * enter the LOCAL_BUSY state.
2771 l2cap_abort_rx_srej_sent(chan);
2774 l2cap_send_ack(chan);
2777 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2778 BT_DBG("Exit LOCAL_BUSY");
2779 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2781 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2782 struct l2cap_ctrl local_control;
2783 memset(&local_control, 0, sizeof(local_control));
2784 local_control.sframe = 1;
2785 local_control.super = L2CAP_SUPER_RR;
2786 local_control.poll = 1;
2787 local_control.reqseq = chan->buffer_seq;
2788 l2cap_send_sframe(chan, &local_control);
2790 chan->retry_count = 1;
2791 __set_monitor_timer(chan);
2792 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2795 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2796 l2cap_process_reqseq(chan, control->reqseq);
2800 case L2CAP_EV_RECV_FBIT:
2801 if (control && control->final) {
2802 __clear_monitor_timer(chan);
2803 if (chan->unacked_frames > 0)
2804 __set_retrans_timer(chan);
2805 chan->retry_count = 0;
2806 chan->tx_state = L2CAP_TX_STATE_XMIT;
2807 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2810 case L2CAP_EV_EXPLICIT_POLL:
2813 case L2CAP_EV_MONITOR_TO:
2814 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2815 l2cap_send_rr_or_rnr(chan, 1);
2816 __set_monitor_timer(chan);
2817 chan->retry_count++;
2819 l2cap_send_disconn_req(chan, ECONNABORTED);
2827 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2828 struct sk_buff_head *skbs, u8 event)
2830 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2831 chan, control, skbs, event, chan->tx_state);
2833 switch (chan->tx_state) {
2834 case L2CAP_TX_STATE_XMIT:
2835 l2cap_tx_state_xmit(chan, control, skbs, event);
2837 case L2CAP_TX_STATE_WAIT_F:
2838 l2cap_tx_state_wait_f(chan, control, skbs, event);
2846 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2847 struct l2cap_ctrl *control)
2849 BT_DBG("chan %p, control %p", chan, control);
2850 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2853 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2854 struct l2cap_ctrl *control)
2856 BT_DBG("chan %p, control %p", chan, control);
2857 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2860 /* Copy frame to all raw sockets on that connection */
2861 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2863 struct sk_buff *nskb;
2864 struct l2cap_chan *chan;
2866 BT_DBG("conn %p", conn);
2868 mutex_lock(&conn->chan_lock);
2870 list_for_each_entry(chan, &conn->chan_l, list) {
2871 struct sock *sk = chan->sk;
2872 if (chan->chan_type != L2CAP_CHAN_RAW)
2875 /* Don't send frame to the socket it came from */
2878 nskb = skb_clone(skb, GFP_KERNEL);
2882 if (chan->ops->recv(chan, nskb))
2886 mutex_unlock(&conn->chan_lock);
2889 /* ---- L2CAP signalling commands ---- */
2890 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2891 u8 ident, u16 dlen, void *data)
2893 struct sk_buff *skb, **frag;
2894 struct l2cap_cmd_hdr *cmd;
2895 struct l2cap_hdr *lh;
2898 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2899 conn, code, ident, dlen);
2901 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2904 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2905 count = min_t(unsigned int, conn->mtu, len);
2907 skb = bt_skb_alloc(count, GFP_KERNEL);
2911 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2912 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2914 if (conn->hcon->type == LE_LINK)
2915 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2917 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2919 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2922 cmd->len = cpu_to_le16(dlen);
2925 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2926 memcpy(skb_put(skb, count), data, count);
2932 /* Continuation fragments (no L2CAP header) */
2933 frag = &skb_shinfo(skb)->frag_list;
2935 count = min_t(unsigned int, conn->mtu, len);
2937 *frag = bt_skb_alloc(count, GFP_KERNEL);
2941 memcpy(skb_put(*frag, count), data, count);
2946 frag = &(*frag)->next;
2956 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2959 struct l2cap_conf_opt *opt = *ptr;
2962 len = L2CAP_CONF_OPT_SIZE + opt->len;
2970 *val = *((u8 *) opt->val);
2974 *val = get_unaligned_le16(opt->val);
2978 *val = get_unaligned_le32(opt->val);
2982 *val = (unsigned long) opt->val;
2986 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2990 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2992 struct l2cap_conf_opt *opt = *ptr;
2994 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3001 *((u8 *) opt->val) = val;
3005 put_unaligned_le16(val, opt->val);
3009 put_unaligned_le32(val, opt->val);
3013 memcpy(opt->val, (void *) val, len);
3017 *ptr += L2CAP_CONF_OPT_SIZE + len;
3020 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3022 struct l2cap_conf_efs efs;
3024 switch (chan->mode) {
3025 case L2CAP_MODE_ERTM:
3026 efs.id = chan->local_id;
3027 efs.stype = chan->local_stype;
3028 efs.msdu = cpu_to_le16(chan->local_msdu);
3029 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3030 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3031 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3034 case L2CAP_MODE_STREAMING:
3036 efs.stype = L2CAP_SERV_BESTEFFORT;
3037 efs.msdu = cpu_to_le16(chan->local_msdu);
3038 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3047 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3048 (unsigned long) &efs);
3051 static void l2cap_ack_timeout(struct work_struct *work)
3053 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3057 BT_DBG("chan %p", chan);
3059 l2cap_chan_lock(chan);
3061 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3062 chan->last_acked_seq);
3065 l2cap_send_rr_or_rnr(chan, 0);
3067 l2cap_chan_unlock(chan);
3068 l2cap_chan_put(chan);
3071 int l2cap_ertm_init(struct l2cap_chan *chan)
3075 chan->next_tx_seq = 0;
3076 chan->expected_tx_seq = 0;
3077 chan->expected_ack_seq = 0;
3078 chan->unacked_frames = 0;
3079 chan->buffer_seq = 0;
3080 chan->frames_sent = 0;
3081 chan->last_acked_seq = 0;
3083 chan->sdu_last_frag = NULL;
3086 skb_queue_head_init(&chan->tx_q);
3088 chan->local_amp_id = AMP_ID_BREDR;
3089 chan->move_id = AMP_ID_BREDR;
3090 chan->move_state = L2CAP_MOVE_STABLE;
3091 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3093 if (chan->mode != L2CAP_MODE_ERTM)
3096 chan->rx_state = L2CAP_RX_STATE_RECV;
3097 chan->tx_state = L2CAP_TX_STATE_XMIT;
3099 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3100 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3101 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3103 skb_queue_head_init(&chan->srej_q);
3105 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3109 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3111 l2cap_seq_list_free(&chan->srej_list);
3116 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3119 case L2CAP_MODE_STREAMING:
3120 case L2CAP_MODE_ERTM:
3121 if (l2cap_mode_supported(mode, remote_feat_mask))
3125 return L2CAP_MODE_BASIC;
3129 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3131 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3134 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3136 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3139 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3140 struct l2cap_conf_rfc *rfc)
3142 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3143 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3145 /* Class 1 devices have must have ERTM timeouts
3146 * exceeding the Link Supervision Timeout. The
3147 * default Link Supervision Timeout for AMP
3148 * controllers is 10 seconds.
3150 * Class 1 devices use 0xffffffff for their
3151 * best-effort flush timeout, so the clamping logic
3152 * will result in a timeout that meets the above
3153 * requirement. ERTM timeouts are 16-bit values, so
3154 * the maximum timeout is 65.535 seconds.
3157 /* Convert timeout to milliseconds and round */
3158 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3160 /* This is the recommended formula for class 2 devices
3161 * that start ERTM timers when packets are sent to the
3164 ertm_to = 3 * ertm_to + 500;
3166 if (ertm_to > 0xffff)
3169 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3170 rfc->monitor_timeout = rfc->retrans_timeout;
3172 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3173 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3177 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3179 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3180 __l2cap_ews_supported(chan->conn)) {
3181 /* use extended control field */
3182 set_bit(FLAG_EXT_CTRL, &chan->flags);
3183 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3185 chan->tx_win = min_t(u16, chan->tx_win,
3186 L2CAP_DEFAULT_TX_WINDOW);
3187 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3189 chan->ack_win = chan->tx_win;
3192 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3194 struct l2cap_conf_req *req = data;
3195 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3196 void *ptr = req->data;
3199 BT_DBG("chan %p", chan);
3201 if (chan->num_conf_req || chan->num_conf_rsp)
3204 switch (chan->mode) {
3205 case L2CAP_MODE_STREAMING:
3206 case L2CAP_MODE_ERTM:
3207 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3210 if (__l2cap_efs_supported(chan->conn))
3211 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3215 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3220 if (chan->imtu != L2CAP_DEFAULT_MTU)
3221 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3223 switch (chan->mode) {
3224 case L2CAP_MODE_BASIC:
3225 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3226 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3229 rfc.mode = L2CAP_MODE_BASIC;
3231 rfc.max_transmit = 0;
3232 rfc.retrans_timeout = 0;
3233 rfc.monitor_timeout = 0;
3234 rfc.max_pdu_size = 0;
3236 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3237 (unsigned long) &rfc);
3240 case L2CAP_MODE_ERTM:
3241 rfc.mode = L2CAP_MODE_ERTM;
3242 rfc.max_transmit = chan->max_tx;
3244 __l2cap_set_ertm_timeouts(chan, &rfc);
3246 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3247 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3249 rfc.max_pdu_size = cpu_to_le16(size);
3251 l2cap_txwin_setup(chan);
3253 rfc.txwin_size = min_t(u16, chan->tx_win,
3254 L2CAP_DEFAULT_TX_WINDOW);
3256 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3257 (unsigned long) &rfc);
3259 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3260 l2cap_add_opt_efs(&ptr, chan);
3262 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3263 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3266 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3267 if (chan->fcs == L2CAP_FCS_NONE ||
3268 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3269 chan->fcs = L2CAP_FCS_NONE;
3270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3275 case L2CAP_MODE_STREAMING:
3276 l2cap_txwin_setup(chan);
3277 rfc.mode = L2CAP_MODE_STREAMING;
3279 rfc.max_transmit = 0;
3280 rfc.retrans_timeout = 0;
3281 rfc.monitor_timeout = 0;
3283 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3284 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3286 rfc.max_pdu_size = cpu_to_le16(size);
3288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3289 (unsigned long) &rfc);
3291 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3292 l2cap_add_opt_efs(&ptr, chan);
3294 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3295 if (chan->fcs == L2CAP_FCS_NONE ||
3296 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3297 chan->fcs = L2CAP_FCS_NONE;
3298 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3304 req->dcid = cpu_to_le16(chan->dcid);
3305 req->flags = __constant_cpu_to_le16(0);
3310 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3312 struct l2cap_conf_rsp *rsp = data;
3313 void *ptr = rsp->data;
3314 void *req = chan->conf_req;
3315 int len = chan->conf_len;
3316 int type, hint, olen;
3318 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3319 struct l2cap_conf_efs efs;
3321 u16 mtu = L2CAP_DEFAULT_MTU;
3322 u16 result = L2CAP_CONF_SUCCESS;
3325 BT_DBG("chan %p", chan);
3327 while (len >= L2CAP_CONF_OPT_SIZE) {
3328 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3330 hint = type & L2CAP_CONF_HINT;
3331 type &= L2CAP_CONF_MASK;
3334 case L2CAP_CONF_MTU:
3338 case L2CAP_CONF_FLUSH_TO:
3339 chan->flush_to = val;
3342 case L2CAP_CONF_QOS:
3345 case L2CAP_CONF_RFC:
3346 if (olen == sizeof(rfc))
3347 memcpy(&rfc, (void *) val, olen);
3350 case L2CAP_CONF_FCS:
3351 if (val == L2CAP_FCS_NONE)
3352 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3355 case L2CAP_CONF_EFS:
3357 if (olen == sizeof(efs))
3358 memcpy(&efs, (void *) val, olen);
3361 case L2CAP_CONF_EWS:
3362 if (!chan->conn->hs_enabled)
3363 return -ECONNREFUSED;
3365 set_bit(FLAG_EXT_CTRL, &chan->flags);
3366 set_bit(CONF_EWS_RECV, &chan->conf_state);
3367 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3368 chan->remote_tx_win = val;
3375 result = L2CAP_CONF_UNKNOWN;
3376 *((u8 *) ptr++) = type;
3381 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3384 switch (chan->mode) {
3385 case L2CAP_MODE_STREAMING:
3386 case L2CAP_MODE_ERTM:
3387 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3388 chan->mode = l2cap_select_mode(rfc.mode,
3389 chan->conn->feat_mask);
3394 if (__l2cap_efs_supported(chan->conn))
3395 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3397 return -ECONNREFUSED;
3400 if (chan->mode != rfc.mode)
3401 return -ECONNREFUSED;
3407 if (chan->mode != rfc.mode) {
3408 result = L2CAP_CONF_UNACCEPT;
3409 rfc.mode = chan->mode;
3411 if (chan->num_conf_rsp == 1)
3412 return -ECONNREFUSED;
3414 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3415 (unsigned long) &rfc);
3418 if (result == L2CAP_CONF_SUCCESS) {
3419 /* Configure output options and let the other side know
3420 * which ones we don't like. */
3422 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3423 result = L2CAP_CONF_UNACCEPT;
3426 set_bit(CONF_MTU_DONE, &chan->conf_state);
3428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3431 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3432 efs.stype != L2CAP_SERV_NOTRAFIC &&
3433 efs.stype != chan->local_stype) {
3435 result = L2CAP_CONF_UNACCEPT;
3437 if (chan->num_conf_req >= 1)
3438 return -ECONNREFUSED;
3440 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3442 (unsigned long) &efs);
3444 /* Send PENDING Conf Rsp */
3445 result = L2CAP_CONF_PENDING;
3446 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3451 case L2CAP_MODE_BASIC:
3452 chan->fcs = L2CAP_FCS_NONE;
3453 set_bit(CONF_MODE_DONE, &chan->conf_state);
3456 case L2CAP_MODE_ERTM:
3457 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3458 chan->remote_tx_win = rfc.txwin_size;
3460 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3462 chan->remote_max_tx = rfc.max_transmit;
3464 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3465 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3466 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3467 rfc.max_pdu_size = cpu_to_le16(size);
3468 chan->remote_mps = size;
3470 __l2cap_set_ertm_timeouts(chan, &rfc);
3472 set_bit(CONF_MODE_DONE, &chan->conf_state);
3474 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3475 sizeof(rfc), (unsigned long) &rfc);
3477 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3478 chan->remote_id = efs.id;
3479 chan->remote_stype = efs.stype;
3480 chan->remote_msdu = le16_to_cpu(efs.msdu);
3481 chan->remote_flush_to =
3482 le32_to_cpu(efs.flush_to);
3483 chan->remote_acc_lat =
3484 le32_to_cpu(efs.acc_lat);
3485 chan->remote_sdu_itime =
3486 le32_to_cpu(efs.sdu_itime);
3487 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3489 (unsigned long) &efs);
3493 case L2CAP_MODE_STREAMING:
3494 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3495 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3496 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3497 rfc.max_pdu_size = cpu_to_le16(size);
3498 chan->remote_mps = size;
3500 set_bit(CONF_MODE_DONE, &chan->conf_state);
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3503 (unsigned long) &rfc);
3508 result = L2CAP_CONF_UNACCEPT;
3510 memset(&rfc, 0, sizeof(rfc));
3511 rfc.mode = chan->mode;
3514 if (result == L2CAP_CONF_SUCCESS)
3515 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3517 rsp->scid = cpu_to_le16(chan->dcid);
3518 rsp->result = cpu_to_le16(result);
3519 rsp->flags = __constant_cpu_to_le16(0);
3524 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3525 void *data, u16 *result)
3527 struct l2cap_conf_req *req = data;
3528 void *ptr = req->data;
3531 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3532 struct l2cap_conf_efs efs;
3534 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3536 while (len >= L2CAP_CONF_OPT_SIZE) {
3537 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3540 case L2CAP_CONF_MTU:
3541 if (val < L2CAP_DEFAULT_MIN_MTU) {
3542 *result = L2CAP_CONF_UNACCEPT;
3543 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3549 case L2CAP_CONF_FLUSH_TO:
3550 chan->flush_to = val;
3551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3555 case L2CAP_CONF_RFC:
3556 if (olen == sizeof(rfc))
3557 memcpy(&rfc, (void *)val, olen);
3559 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3560 rfc.mode != chan->mode)
3561 return -ECONNREFUSED;
3565 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3566 sizeof(rfc), (unsigned long) &rfc);
3569 case L2CAP_CONF_EWS:
3570 chan->ack_win = min_t(u16, val, chan->ack_win);
3571 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3575 case L2CAP_CONF_EFS:
3576 if (olen == sizeof(efs))
3577 memcpy(&efs, (void *)val, olen);
3579 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3580 efs.stype != L2CAP_SERV_NOTRAFIC &&
3581 efs.stype != chan->local_stype)
3582 return -ECONNREFUSED;
3584 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3585 (unsigned long) &efs);
3588 case L2CAP_CONF_FCS:
3589 if (*result == L2CAP_CONF_PENDING)
3590 if (val == L2CAP_FCS_NONE)
3591 set_bit(CONF_RECV_NO_FCS,
3597 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3598 return -ECONNREFUSED;
3600 chan->mode = rfc.mode;
3602 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3604 case L2CAP_MODE_ERTM:
3605 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3606 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3607 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3608 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3609 chan->ack_win = min_t(u16, chan->ack_win,
3612 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3613 chan->local_msdu = le16_to_cpu(efs.msdu);
3614 chan->local_sdu_itime =
3615 le32_to_cpu(efs.sdu_itime);
3616 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3617 chan->local_flush_to =
3618 le32_to_cpu(efs.flush_to);
3622 case L2CAP_MODE_STREAMING:
3623 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3627 req->dcid = cpu_to_le16(chan->dcid);
3628 req->flags = __constant_cpu_to_le16(0);
3633 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3634 u16 result, u16 flags)
3636 struct l2cap_conf_rsp *rsp = data;
3637 void *ptr = rsp->data;
3639 BT_DBG("chan %p", chan);
3641 rsp->scid = cpu_to_le16(chan->dcid);
3642 rsp->result = cpu_to_le16(result);
3643 rsp->flags = cpu_to_le16(flags);
3648 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3650 struct l2cap_conn_rsp rsp;
3651 struct l2cap_conn *conn = chan->conn;
3655 rsp.scid = cpu_to_le16(chan->dcid);
3656 rsp.dcid = cpu_to_le16(chan->scid);
3657 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3658 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3661 rsp_code = L2CAP_CREATE_CHAN_RSP;
3663 rsp_code = L2CAP_CONN_RSP;
3665 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3667 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3669 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3672 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3673 l2cap_build_conf_req(chan, buf), buf);
3674 chan->num_conf_req++;
3677 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3681 /* Use sane default values in case a misbehaving remote device
3682 * did not send an RFC or extended window size option.
3684 u16 txwin_ext = chan->ack_win;
3685 struct l2cap_conf_rfc rfc = {
3687 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3688 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3689 .max_pdu_size = cpu_to_le16(chan->imtu),
3690 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3693 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3695 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3698 while (len >= L2CAP_CONF_OPT_SIZE) {
3699 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3702 case L2CAP_CONF_RFC:
3703 if (olen == sizeof(rfc))
3704 memcpy(&rfc, (void *)val, olen);
3706 case L2CAP_CONF_EWS:
3713 case L2CAP_MODE_ERTM:
3714 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3715 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3716 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3717 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3718 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3720 chan->ack_win = min_t(u16, chan->ack_win,
3723 case L2CAP_MODE_STREAMING:
3724 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3728 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3729 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3732 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3734 if (cmd_len < sizeof(*rej))
3737 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3740 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3741 cmd->ident == conn->info_ident) {
3742 cancel_delayed_work(&conn->info_timer);
3744 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3745 conn->info_ident = 0;
3747 l2cap_conn_start(conn);
3753 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3754 struct l2cap_cmd_hdr *cmd,
3755 u8 *data, u8 rsp_code, u8 amp_id)
3757 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3758 struct l2cap_conn_rsp rsp;
3759 struct l2cap_chan *chan = NULL, *pchan;
3760 struct sock *parent, *sk = NULL;
3761 int result, status = L2CAP_CS_NO_INFO;
3763 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3764 __le16 psm = req->psm;
3766 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3768 /* Check if we have socket listening on psm */
3769 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3772 result = L2CAP_CR_BAD_PSM;
3778 mutex_lock(&conn->chan_lock);
3781 /* Check if the ACL is secure enough (if not SDP) */
3782 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3783 !hci_conn_check_link_mode(conn->hcon)) {
3784 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3785 result = L2CAP_CR_SEC_BLOCK;
3789 result = L2CAP_CR_NO_MEM;
3791 /* Check if we already have channel with that dcid */
3792 if (__l2cap_get_chan_by_dcid(conn, scid))
3795 chan = pchan->ops->new_connection(pchan);
3801 /* For certain devices (ex: HID mouse), support for authentication,
3802 * pairing and bonding is optional. For such devices, inorder to avoid
3803 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3804 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3806 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3808 bacpy(&chan->src, &conn->hcon->src);
3809 bacpy(&chan->dst, &conn->hcon->dst);
3810 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3811 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3814 chan->local_amp_id = amp_id;
3816 __l2cap_chan_add(conn, chan);
3820 __set_chan_timer(chan, sk->sk_sndtimeo);
3822 chan->ident = cmd->ident;
3824 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3825 if (l2cap_chan_check_security(chan)) {
3826 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3827 __l2cap_state_change(chan, BT_CONNECT2);
3828 result = L2CAP_CR_PEND;
3829 status = L2CAP_CS_AUTHOR_PEND;
3830 chan->ops->defer(chan);
3832 /* Force pending result for AMP controllers.
3833 * The connection will succeed after the
3834 * physical link is up.
3836 if (amp_id == AMP_ID_BREDR) {
3837 __l2cap_state_change(chan, BT_CONFIG);
3838 result = L2CAP_CR_SUCCESS;
3840 __l2cap_state_change(chan, BT_CONNECT2);
3841 result = L2CAP_CR_PEND;
3843 status = L2CAP_CS_NO_INFO;
3846 __l2cap_state_change(chan, BT_CONNECT2);
3847 result = L2CAP_CR_PEND;
3848 status = L2CAP_CS_AUTHEN_PEND;
3851 __l2cap_state_change(chan, BT_CONNECT2);
3852 result = L2CAP_CR_PEND;
3853 status = L2CAP_CS_NO_INFO;
3857 release_sock(parent);
3858 mutex_unlock(&conn->chan_lock);
3861 rsp.scid = cpu_to_le16(scid);
3862 rsp.dcid = cpu_to_le16(dcid);
3863 rsp.result = cpu_to_le16(result);
3864 rsp.status = cpu_to_le16(status);
3865 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3867 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3868 struct l2cap_info_req info;
3869 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3871 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3872 conn->info_ident = l2cap_get_ident(conn);
3874 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3876 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3877 sizeof(info), &info);
3880 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3881 result == L2CAP_CR_SUCCESS) {
3883 set_bit(CONF_REQ_SENT, &chan->conf_state);
3884 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3885 l2cap_build_conf_req(chan, buf), buf);
3886 chan->num_conf_req++;
3892 static int l2cap_connect_req(struct l2cap_conn *conn,
3893 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3895 struct hci_dev *hdev = conn->hcon->hdev;
3896 struct hci_conn *hcon = conn->hcon;
3898 if (cmd_len < sizeof(struct l2cap_conn_req))
3902 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3903 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3904 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3905 hcon->dst_type, 0, NULL, 0,
3907 hci_dev_unlock(hdev);
3909 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3913 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3914 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3917 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3918 u16 scid, dcid, result, status;
3919 struct l2cap_chan *chan;
3923 if (cmd_len < sizeof(*rsp))
3926 scid = __le16_to_cpu(rsp->scid);
3927 dcid = __le16_to_cpu(rsp->dcid);
3928 result = __le16_to_cpu(rsp->result);
3929 status = __le16_to_cpu(rsp->status);
3931 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3932 dcid, scid, result, status);
3934 mutex_lock(&conn->chan_lock);
3937 chan = __l2cap_get_chan_by_scid(conn, scid);
3943 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3952 l2cap_chan_lock(chan);
3955 case L2CAP_CR_SUCCESS:
3956 l2cap_state_change(chan, BT_CONFIG);
3959 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3961 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3964 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3965 l2cap_build_conf_req(chan, req), req);
3966 chan->num_conf_req++;
3970 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3974 l2cap_chan_del(chan, ECONNREFUSED);
3978 l2cap_chan_unlock(chan);
3981 mutex_unlock(&conn->chan_lock);
3986 static inline void set_default_fcs(struct l2cap_chan *chan)
3988 /* FCS is enabled only in ERTM or streaming mode, if one or both
3991 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3992 chan->fcs = L2CAP_FCS_NONE;
3993 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3994 chan->fcs = L2CAP_FCS_CRC16;
3997 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3998 u8 ident, u16 flags)
4000 struct l2cap_conn *conn = chan->conn;
4002 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4005 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4006 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4008 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4009 l2cap_build_conf_rsp(chan, data,
4010 L2CAP_CONF_SUCCESS, flags), data);
4013 static inline int l2cap_config_req(struct l2cap_conn *conn,
4014 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4017 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4020 struct l2cap_chan *chan;
4023 if (cmd_len < sizeof(*req))
4026 dcid = __le16_to_cpu(req->dcid);
4027 flags = __le16_to_cpu(req->flags);
4029 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4031 chan = l2cap_get_chan_by_scid(conn, dcid);
4035 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4036 struct l2cap_cmd_rej_cid rej;
4038 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4039 rej.scid = cpu_to_le16(chan->scid);
4040 rej.dcid = cpu_to_le16(chan->dcid);
4042 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4047 /* Reject if config buffer is too small. */
4048 len = cmd_len - sizeof(*req);
4049 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4050 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4051 l2cap_build_conf_rsp(chan, rsp,
4052 L2CAP_CONF_REJECT, flags), rsp);
4057 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4058 chan->conf_len += len;
4060 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4061 /* Incomplete config. Send empty response. */
4062 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4063 l2cap_build_conf_rsp(chan, rsp,
4064 L2CAP_CONF_SUCCESS, flags), rsp);
4068 /* Complete config. */
4069 len = l2cap_parse_conf_req(chan, rsp);
4071 l2cap_send_disconn_req(chan, ECONNRESET);
4075 chan->ident = cmd->ident;
4076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4077 chan->num_conf_rsp++;
4079 /* Reset config buffer. */
4082 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4085 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4086 set_default_fcs(chan);
4088 if (chan->mode == L2CAP_MODE_ERTM ||
4089 chan->mode == L2CAP_MODE_STREAMING)
4090 err = l2cap_ertm_init(chan);
4093 l2cap_send_disconn_req(chan, -err);
4095 l2cap_chan_ready(chan);
4100 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4102 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4103 l2cap_build_conf_req(chan, buf), buf);
4104 chan->num_conf_req++;
4107 /* Got Conf Rsp PENDING from remote side and asume we sent
4108 Conf Rsp PENDING in the code above */
4109 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4110 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4112 /* check compatibility */
4114 /* Send rsp for BR/EDR channel */
4116 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4118 chan->ident = cmd->ident;
4122 l2cap_chan_unlock(chan);
4126 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4127 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4130 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4131 u16 scid, flags, result;
4132 struct l2cap_chan *chan;
4133 int len = cmd_len - sizeof(*rsp);
4136 if (cmd_len < sizeof(*rsp))
4139 scid = __le16_to_cpu(rsp->scid);
4140 flags = __le16_to_cpu(rsp->flags);
4141 result = __le16_to_cpu(rsp->result);
4143 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4146 chan = l2cap_get_chan_by_scid(conn, scid);
4151 case L2CAP_CONF_SUCCESS:
4152 l2cap_conf_rfc_get(chan, rsp->data, len);
4153 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4156 case L2CAP_CONF_PENDING:
4157 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4159 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4162 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4165 l2cap_send_disconn_req(chan, ECONNRESET);
4169 if (!chan->hs_hcon) {
4170 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4173 if (l2cap_check_efs(chan)) {
4174 amp_create_logical_link(chan);
4175 chan->ident = cmd->ident;
4181 case L2CAP_CONF_UNACCEPT:
4182 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4185 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4186 l2cap_send_disconn_req(chan, ECONNRESET);
4190 /* throw out any old stored conf requests */
4191 result = L2CAP_CONF_SUCCESS;
4192 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4195 l2cap_send_disconn_req(chan, ECONNRESET);
4199 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4200 L2CAP_CONF_REQ, len, req);
4201 chan->num_conf_req++;
4202 if (result != L2CAP_CONF_SUCCESS)
4208 l2cap_chan_set_err(chan, ECONNRESET);
4210 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4211 l2cap_send_disconn_req(chan, ECONNRESET);
4215 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4218 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4220 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4221 set_default_fcs(chan);
4223 if (chan->mode == L2CAP_MODE_ERTM ||
4224 chan->mode == L2CAP_MODE_STREAMING)
4225 err = l2cap_ertm_init(chan);
4228 l2cap_send_disconn_req(chan, -err);
4230 l2cap_chan_ready(chan);
4234 l2cap_chan_unlock(chan);
4238 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4239 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4242 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4243 struct l2cap_disconn_rsp rsp;
4245 struct l2cap_chan *chan;
4248 if (cmd_len != sizeof(*req))
4251 scid = __le16_to_cpu(req->scid);
4252 dcid = __le16_to_cpu(req->dcid);
4254 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4256 mutex_lock(&conn->chan_lock);
4258 chan = __l2cap_get_chan_by_scid(conn, dcid);
4260 mutex_unlock(&conn->chan_lock);
4264 l2cap_chan_lock(chan);
4268 rsp.dcid = cpu_to_le16(chan->scid);
4269 rsp.scid = cpu_to_le16(chan->dcid);
4270 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4273 sk->sk_shutdown = SHUTDOWN_MASK;
4276 l2cap_chan_hold(chan);
4277 l2cap_chan_del(chan, ECONNRESET);
4279 l2cap_chan_unlock(chan);
4281 chan->ops->close(chan);
4282 l2cap_chan_put(chan);
4284 mutex_unlock(&conn->chan_lock);
4289 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4290 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4293 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4295 struct l2cap_chan *chan;
4297 if (cmd_len != sizeof(*rsp))
4300 scid = __le16_to_cpu(rsp->scid);
4301 dcid = __le16_to_cpu(rsp->dcid);
4303 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4305 mutex_lock(&conn->chan_lock);
4307 chan = __l2cap_get_chan_by_scid(conn, scid);
4309 mutex_unlock(&conn->chan_lock);
4313 l2cap_chan_lock(chan);
4315 l2cap_chan_hold(chan);
4316 l2cap_chan_del(chan, 0);
4318 l2cap_chan_unlock(chan);
4320 chan->ops->close(chan);
4321 l2cap_chan_put(chan);
4323 mutex_unlock(&conn->chan_lock);
4328 static inline int l2cap_information_req(struct l2cap_conn *conn,
4329 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4332 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4335 if (cmd_len != sizeof(*req))
4338 type = __le16_to_cpu(req->type);
4340 BT_DBG("type 0x%4.4x", type);
4342 if (type == L2CAP_IT_FEAT_MASK) {
4344 u32 feat_mask = l2cap_feat_mask;
4345 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4346 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4347 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4349 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4351 if (conn->hs_enabled)
4352 feat_mask |= L2CAP_FEAT_EXT_FLOW
4353 | L2CAP_FEAT_EXT_WINDOW;
4355 put_unaligned_le32(feat_mask, rsp->data);
4356 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4358 } else if (type == L2CAP_IT_FIXED_CHAN) {
4360 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4362 if (conn->hs_enabled)
4363 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4365 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4367 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4368 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4369 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4370 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4373 struct l2cap_info_rsp rsp;
4374 rsp.type = cpu_to_le16(type);
4375 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4376 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4383 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4384 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4387 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4390 if (cmd_len < sizeof(*rsp))
4393 type = __le16_to_cpu(rsp->type);
4394 result = __le16_to_cpu(rsp->result);
4396 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4398 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4399 if (cmd->ident != conn->info_ident ||
4400 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4403 cancel_delayed_work(&conn->info_timer);
4405 if (result != L2CAP_IR_SUCCESS) {
4406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4407 conn->info_ident = 0;
4409 l2cap_conn_start(conn);
4415 case L2CAP_IT_FEAT_MASK:
4416 conn->feat_mask = get_unaligned_le32(rsp->data);
4418 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4419 struct l2cap_info_req req;
4420 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4422 conn->info_ident = l2cap_get_ident(conn);
4424 l2cap_send_cmd(conn, conn->info_ident,
4425 L2CAP_INFO_REQ, sizeof(req), &req);
4427 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4428 conn->info_ident = 0;
4430 l2cap_conn_start(conn);
4434 case L2CAP_IT_FIXED_CHAN:
4435 conn->fixed_chan_mask = rsp->data[0];
4436 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4437 conn->info_ident = 0;
4439 l2cap_conn_start(conn);
4446 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4447 struct l2cap_cmd_hdr *cmd,
4448 u16 cmd_len, void *data)
4450 struct l2cap_create_chan_req *req = data;
4451 struct l2cap_create_chan_rsp rsp;
4452 struct l2cap_chan *chan;
4453 struct hci_dev *hdev;
4456 if (cmd_len != sizeof(*req))
4459 if (!conn->hs_enabled)
4462 psm = le16_to_cpu(req->psm);
4463 scid = le16_to_cpu(req->scid);
4465 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4467 /* For controller id 0 make BR/EDR connection */
4468 if (req->amp_id == AMP_ID_BREDR) {
4469 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4474 /* Validate AMP controller id */
4475 hdev = hci_dev_get(req->amp_id);
4479 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4484 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4487 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4488 struct hci_conn *hs_hcon;
4490 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4497 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4499 mgr->bredr_chan = chan;
4500 chan->hs_hcon = hs_hcon;
4501 chan->fcs = L2CAP_FCS_NONE;
4502 conn->mtu = hdev->block_mtu;
4511 rsp.scid = cpu_to_le16(scid);
4512 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4513 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4515 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4521 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4523 struct l2cap_move_chan_req req;
4526 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4528 ident = l2cap_get_ident(chan->conn);
4529 chan->ident = ident;
4531 req.icid = cpu_to_le16(chan->scid);
4532 req.dest_amp_id = dest_amp_id;
4534 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4537 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4540 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4542 struct l2cap_move_chan_rsp rsp;
4544 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4546 rsp.icid = cpu_to_le16(chan->dcid);
4547 rsp.result = cpu_to_le16(result);
4549 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4553 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4555 struct l2cap_move_chan_cfm cfm;
4557 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4559 chan->ident = l2cap_get_ident(chan->conn);
4561 cfm.icid = cpu_to_le16(chan->scid);
4562 cfm.result = cpu_to_le16(result);
4564 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4567 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4570 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4572 struct l2cap_move_chan_cfm cfm;
4574 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4576 cfm.icid = cpu_to_le16(icid);
4577 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4579 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4583 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4586 struct l2cap_move_chan_cfm_rsp rsp;
4588 BT_DBG("icid 0x%4.4x", icid);
4590 rsp.icid = cpu_to_le16(icid);
4591 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4594 static void __release_logical_link(struct l2cap_chan *chan)
4596 chan->hs_hchan = NULL;
4597 chan->hs_hcon = NULL;
4599 /* Placeholder - release the logical link */
4602 static void l2cap_logical_fail(struct l2cap_chan *chan)
4604 /* Logical link setup failed */
4605 if (chan->state != BT_CONNECTED) {
4606 /* Create channel failure, disconnect */
4607 l2cap_send_disconn_req(chan, ECONNRESET);
4611 switch (chan->move_role) {
4612 case L2CAP_MOVE_ROLE_RESPONDER:
4613 l2cap_move_done(chan);
4614 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4616 case L2CAP_MOVE_ROLE_INITIATOR:
4617 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4618 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4619 /* Remote has only sent pending or
4620 * success responses, clean up
4622 l2cap_move_done(chan);
4625 /* Other amp move states imply that the move
4626 * has already aborted
4628 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4633 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4634 struct hci_chan *hchan)
4636 struct l2cap_conf_rsp rsp;
4638 chan->hs_hchan = hchan;
4639 chan->hs_hcon->l2cap_data = chan->conn;
4641 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4643 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4646 set_default_fcs(chan);
4648 err = l2cap_ertm_init(chan);
4650 l2cap_send_disconn_req(chan, -err);
4652 l2cap_chan_ready(chan);
4656 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4657 struct hci_chan *hchan)
4659 chan->hs_hcon = hchan->conn;
4660 chan->hs_hcon->l2cap_data = chan->conn;
4662 BT_DBG("move_state %d", chan->move_state);
4664 switch (chan->move_state) {
4665 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4666 /* Move confirm will be sent after a success
4667 * response is received
4669 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4671 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4672 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4673 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4674 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4675 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4676 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4677 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4678 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4679 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4683 /* Move was not in expected state, free the channel */
4684 __release_logical_link(chan);
4686 chan->move_state = L2CAP_MOVE_STABLE;
4690 /* Call with chan locked */
4691 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4694 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4697 l2cap_logical_fail(chan);
4698 __release_logical_link(chan);
4702 if (chan->state != BT_CONNECTED) {
4703 /* Ignore logical link if channel is on BR/EDR */
4704 if (chan->local_amp_id != AMP_ID_BREDR)
4705 l2cap_logical_finish_create(chan, hchan);
4707 l2cap_logical_finish_move(chan, hchan);
4711 void l2cap_move_start(struct l2cap_chan *chan)
4713 BT_DBG("chan %p", chan);
4715 if (chan->local_amp_id == AMP_ID_BREDR) {
4716 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4718 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4719 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4720 /* Placeholder - start physical link setup */
4722 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4723 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4725 l2cap_move_setup(chan);
4726 l2cap_send_move_chan_req(chan, 0);
4730 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4731 u8 local_amp_id, u8 remote_amp_id)
4733 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4734 local_amp_id, remote_amp_id);
4736 chan->fcs = L2CAP_FCS_NONE;
4738 /* Outgoing channel on AMP */
4739 if (chan->state == BT_CONNECT) {
4740 if (result == L2CAP_CR_SUCCESS) {
4741 chan->local_amp_id = local_amp_id;
4742 l2cap_send_create_chan_req(chan, remote_amp_id);
4744 /* Revert to BR/EDR connect */
4745 l2cap_send_conn_req(chan);
4751 /* Incoming channel on AMP */
4752 if (__l2cap_no_conn_pending(chan)) {
4753 struct l2cap_conn_rsp rsp;
4755 rsp.scid = cpu_to_le16(chan->dcid);
4756 rsp.dcid = cpu_to_le16(chan->scid);
4758 if (result == L2CAP_CR_SUCCESS) {
4759 /* Send successful response */
4760 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4761 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4763 /* Send negative response */
4764 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4765 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4768 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4771 if (result == L2CAP_CR_SUCCESS) {
4772 __l2cap_state_change(chan, BT_CONFIG);
4773 set_bit(CONF_REQ_SENT, &chan->conf_state);
4774 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4776 l2cap_build_conf_req(chan, buf), buf);
4777 chan->num_conf_req++;
4782 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4785 l2cap_move_setup(chan);
4786 chan->move_id = local_amp_id;
4787 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4789 l2cap_send_move_chan_req(chan, remote_amp_id);
4792 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4794 struct hci_chan *hchan = NULL;
4796 /* Placeholder - get hci_chan for logical link */
4799 if (hchan->state == BT_CONNECTED) {
4800 /* Logical link is ready to go */
4801 chan->hs_hcon = hchan->conn;
4802 chan->hs_hcon->l2cap_data = chan->conn;
4803 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4804 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4806 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4808 /* Wait for logical link to be ready */
4809 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4812 /* Logical link not available */
4813 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4817 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4819 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4821 if (result == -EINVAL)
4822 rsp_result = L2CAP_MR_BAD_ID;
4824 rsp_result = L2CAP_MR_NOT_ALLOWED;
4826 l2cap_send_move_chan_rsp(chan, rsp_result);
4829 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4830 chan->move_state = L2CAP_MOVE_STABLE;
4832 /* Restart data transmission */
4833 l2cap_ertm_send(chan);
4836 /* Invoke with locked chan */
4837 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4839 u8 local_amp_id = chan->local_amp_id;
4840 u8 remote_amp_id = chan->remote_amp_id;
4842 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4843 chan, result, local_amp_id, remote_amp_id);
4845 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4846 l2cap_chan_unlock(chan);
4850 if (chan->state != BT_CONNECTED) {
4851 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4852 } else if (result != L2CAP_MR_SUCCESS) {
4853 l2cap_do_move_cancel(chan, result);
4855 switch (chan->move_role) {
4856 case L2CAP_MOVE_ROLE_INITIATOR:
4857 l2cap_do_move_initiate(chan, local_amp_id,
4860 case L2CAP_MOVE_ROLE_RESPONDER:
4861 l2cap_do_move_respond(chan, result);
4864 l2cap_do_move_cancel(chan, result);
4870 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4871 struct l2cap_cmd_hdr *cmd,
4872 u16 cmd_len, void *data)
4874 struct l2cap_move_chan_req *req = data;
4875 struct l2cap_move_chan_rsp rsp;
4876 struct l2cap_chan *chan;
4878 u16 result = L2CAP_MR_NOT_ALLOWED;
4880 if (cmd_len != sizeof(*req))
4883 icid = le16_to_cpu(req->icid);
4885 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4887 if (!conn->hs_enabled)
4890 chan = l2cap_get_chan_by_dcid(conn, icid);
4892 rsp.icid = cpu_to_le16(icid);
4893 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4894 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4899 chan->ident = cmd->ident;
4901 if (chan->scid < L2CAP_CID_DYN_START ||
4902 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4903 (chan->mode != L2CAP_MODE_ERTM &&
4904 chan->mode != L2CAP_MODE_STREAMING)) {
4905 result = L2CAP_MR_NOT_ALLOWED;
4906 goto send_move_response;
4909 if (chan->local_amp_id == req->dest_amp_id) {
4910 result = L2CAP_MR_SAME_ID;
4911 goto send_move_response;
4914 if (req->dest_amp_id != AMP_ID_BREDR) {
4915 struct hci_dev *hdev;
4916 hdev = hci_dev_get(req->dest_amp_id);
4917 if (!hdev || hdev->dev_type != HCI_AMP ||
4918 !test_bit(HCI_UP, &hdev->flags)) {
4922 result = L2CAP_MR_BAD_ID;
4923 goto send_move_response;
4928 /* Detect a move collision. Only send a collision response
4929 * if this side has "lost", otherwise proceed with the move.
4930 * The winner has the larger bd_addr.
4932 if ((__chan_is_moving(chan) ||
4933 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4934 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4935 result = L2CAP_MR_COLLISION;
4936 goto send_move_response;
4939 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4940 l2cap_move_setup(chan);
4941 chan->move_id = req->dest_amp_id;
4944 if (req->dest_amp_id == AMP_ID_BREDR) {
4945 /* Moving to BR/EDR */
4946 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4947 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4948 result = L2CAP_MR_PEND;
4950 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4951 result = L2CAP_MR_SUCCESS;
4954 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4955 /* Placeholder - uncomment when amp functions are available */
4956 /*amp_accept_physical(chan, req->dest_amp_id);*/
4957 result = L2CAP_MR_PEND;
4961 l2cap_send_move_chan_rsp(chan, result);
4963 l2cap_chan_unlock(chan);
4968 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4970 struct l2cap_chan *chan;
4971 struct hci_chan *hchan = NULL;
4973 chan = l2cap_get_chan_by_scid(conn, icid);
4975 l2cap_send_move_chan_cfm_icid(conn, icid);
4979 __clear_chan_timer(chan);
4980 if (result == L2CAP_MR_PEND)
4981 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4983 switch (chan->move_state) {
4984 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4985 /* Move confirm will be sent when logical link
4988 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4990 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4991 if (result == L2CAP_MR_PEND) {
4993 } else if (test_bit(CONN_LOCAL_BUSY,
4994 &chan->conn_state)) {
4995 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4997 /* Logical link is up or moving to BR/EDR,
5000 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5001 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5004 case L2CAP_MOVE_WAIT_RSP:
5006 if (result == L2CAP_MR_SUCCESS) {
5007 /* Remote is ready, send confirm immediately
5008 * after logical link is ready
5010 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5012 /* Both logical link and move success
5013 * are required to confirm
5015 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5018 /* Placeholder - get hci_chan for logical link */
5020 /* Logical link not available */
5021 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5025 /* If the logical link is not yet connected, do not
5026 * send confirmation.
5028 if (hchan->state != BT_CONNECTED)
5031 /* Logical link is already ready to go */
5033 chan->hs_hcon = hchan->conn;
5034 chan->hs_hcon->l2cap_data = chan->conn;
5036 if (result == L2CAP_MR_SUCCESS) {
5037 /* Can confirm now */
5038 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5040 /* Now only need move success
5043 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5046 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5049 /* Any other amp move state means the move failed. */
5050 chan->move_id = chan->local_amp_id;
5051 l2cap_move_done(chan);
5052 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5055 l2cap_chan_unlock(chan);
5058 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5061 struct l2cap_chan *chan;
5063 chan = l2cap_get_chan_by_ident(conn, ident);
5065 /* Could not locate channel, icid is best guess */
5066 l2cap_send_move_chan_cfm_icid(conn, icid);
5070 __clear_chan_timer(chan);
5072 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5073 if (result == L2CAP_MR_COLLISION) {
5074 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5076 /* Cleanup - cancel move */
5077 chan->move_id = chan->local_amp_id;
5078 l2cap_move_done(chan);
5082 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5084 l2cap_chan_unlock(chan);
5087 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5088 struct l2cap_cmd_hdr *cmd,
5089 u16 cmd_len, void *data)
5091 struct l2cap_move_chan_rsp *rsp = data;
5094 if (cmd_len != sizeof(*rsp))
5097 icid = le16_to_cpu(rsp->icid);
5098 result = le16_to_cpu(rsp->result);
5100 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5102 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5103 l2cap_move_continue(conn, icid, result);
5105 l2cap_move_fail(conn, cmd->ident, icid, result);
5110 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5111 struct l2cap_cmd_hdr *cmd,
5112 u16 cmd_len, void *data)
5114 struct l2cap_move_chan_cfm *cfm = data;
5115 struct l2cap_chan *chan;
5118 if (cmd_len != sizeof(*cfm))
5121 icid = le16_to_cpu(cfm->icid);
5122 result = le16_to_cpu(cfm->result);
5124 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5126 chan = l2cap_get_chan_by_dcid(conn, icid);
5128 /* Spec requires a response even if the icid was not found */
5129 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5133 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5134 if (result == L2CAP_MC_CONFIRMED) {
5135 chan->local_amp_id = chan->move_id;
5136 if (chan->local_amp_id == AMP_ID_BREDR)
5137 __release_logical_link(chan);
5139 chan->move_id = chan->local_amp_id;
5142 l2cap_move_done(chan);
5145 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5147 l2cap_chan_unlock(chan);
5152 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5153 struct l2cap_cmd_hdr *cmd,
5154 u16 cmd_len, void *data)
5156 struct l2cap_move_chan_cfm_rsp *rsp = data;
5157 struct l2cap_chan *chan;
5160 if (cmd_len != sizeof(*rsp))
5163 icid = le16_to_cpu(rsp->icid);
5165 BT_DBG("icid 0x%4.4x", icid);
5167 chan = l2cap_get_chan_by_scid(conn, icid);
5171 __clear_chan_timer(chan);
5173 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5174 chan->local_amp_id = chan->move_id;
5176 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5177 __release_logical_link(chan);
5179 l2cap_move_done(chan);
5182 l2cap_chan_unlock(chan);
5187 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5192 if (min > max || min < 6 || max > 3200)
5195 if (to_multiplier < 10 || to_multiplier > 3200)
5198 if (max >= to_multiplier * 8)
5201 max_latency = (to_multiplier * 8 / max) - 1;
5202 if (latency > 499 || latency > max_latency)
5208 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5209 struct l2cap_cmd_hdr *cmd,
5212 struct hci_conn *hcon = conn->hcon;
5213 struct l2cap_conn_param_update_req *req;
5214 struct l2cap_conn_param_update_rsp rsp;
5215 u16 min, max, latency, to_multiplier, cmd_len;
5218 if (!(hcon->link_mode & HCI_LM_MASTER))
5221 cmd_len = __le16_to_cpu(cmd->len);
5222 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5225 req = (struct l2cap_conn_param_update_req *) data;
5226 min = __le16_to_cpu(req->min);
5227 max = __le16_to_cpu(req->max);
5228 latency = __le16_to_cpu(req->latency);
5229 to_multiplier = __le16_to_cpu(req->to_multiplier);
5231 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5232 min, max, latency, to_multiplier);
5234 memset(&rsp, 0, sizeof(rsp));
5236 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5238 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5240 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5242 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5246 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5251 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5252 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5257 switch (cmd->code) {
5258 case L2CAP_COMMAND_REJ:
5259 l2cap_command_rej(conn, cmd, cmd_len, data);
5262 case L2CAP_CONN_REQ:
5263 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5266 case L2CAP_CONN_RSP:
5267 case L2CAP_CREATE_CHAN_RSP:
5268 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5271 case L2CAP_CONF_REQ:
5272 err = l2cap_config_req(conn, cmd, cmd_len, data);
5275 case L2CAP_CONF_RSP:
5276 l2cap_config_rsp(conn, cmd, cmd_len, data);
5279 case L2CAP_DISCONN_REQ:
5280 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5283 case L2CAP_DISCONN_RSP:
5284 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5287 case L2CAP_ECHO_REQ:
5288 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5291 case L2CAP_ECHO_RSP:
5294 case L2CAP_INFO_REQ:
5295 err = l2cap_information_req(conn, cmd, cmd_len, data);
5298 case L2CAP_INFO_RSP:
5299 l2cap_information_rsp(conn, cmd, cmd_len, data);
5302 case L2CAP_CREATE_CHAN_REQ:
5303 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5306 case L2CAP_MOVE_CHAN_REQ:
5307 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5310 case L2CAP_MOVE_CHAN_RSP:
5311 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5314 case L2CAP_MOVE_CHAN_CFM:
5315 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5318 case L2CAP_MOVE_CHAN_CFM_RSP:
5319 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5323 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5331 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5332 struct l2cap_cmd_hdr *cmd, u8 *data)
5334 switch (cmd->code) {
5335 case L2CAP_COMMAND_REJ:
5338 case L2CAP_CONN_PARAM_UPDATE_REQ:
5339 return l2cap_conn_param_update_req(conn, cmd, data);
5341 case L2CAP_CONN_PARAM_UPDATE_RSP:
5345 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5350 static __le16 l2cap_err_to_reason(int err)
5354 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5356 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5360 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5364 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5365 struct sk_buff *skb)
5367 struct hci_conn *hcon = conn->hcon;
5368 struct l2cap_cmd_hdr *cmd;
5372 if (hcon->type != LE_LINK)
5375 if (skb->len < L2CAP_CMD_HDR_SIZE)
5378 cmd = (void *) skb->data;
5379 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5381 len = le16_to_cpu(cmd->len);
5383 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5385 if (len != skb->len || !cmd->ident) {
5386 BT_DBG("corrupted command");
5390 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5392 struct l2cap_cmd_rej_unk rej;
5394 BT_ERR("Wrong link type (%d)", err);
5396 rej.reason = l2cap_err_to_reason(err);
5397 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5405 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5406 struct sk_buff *skb)
5408 struct hci_conn *hcon = conn->hcon;
5409 u8 *data = skb->data;
5411 struct l2cap_cmd_hdr cmd;
5414 l2cap_raw_recv(conn, skb);
5416 if (hcon->type != ACL_LINK)
5419 while (len >= L2CAP_CMD_HDR_SIZE) {
5421 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5422 data += L2CAP_CMD_HDR_SIZE;
5423 len -= L2CAP_CMD_HDR_SIZE;
5425 cmd_len = le16_to_cpu(cmd.len);
5427 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5430 if (cmd_len > len || !cmd.ident) {
5431 BT_DBG("corrupted command");
5435 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5437 struct l2cap_cmd_rej_unk rej;
5439 BT_ERR("Wrong link type (%d)", err);
5441 rej.reason = l2cap_err_to_reason(err);
5442 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5454 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5456 u16 our_fcs, rcv_fcs;
5459 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5460 hdr_size = L2CAP_EXT_HDR_SIZE;
5462 hdr_size = L2CAP_ENH_HDR_SIZE;
5464 if (chan->fcs == L2CAP_FCS_CRC16) {
5465 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5466 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5467 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5469 if (our_fcs != rcv_fcs)
5475 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5477 struct l2cap_ctrl control;
5479 BT_DBG("chan %p", chan);
5481 memset(&control, 0, sizeof(control));
5484 control.reqseq = chan->buffer_seq;
5485 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5488 control.super = L2CAP_SUPER_RNR;
5489 l2cap_send_sframe(chan, &control);
5492 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5493 chan->unacked_frames > 0)
5494 __set_retrans_timer(chan);
5496 /* Send pending iframes */
5497 l2cap_ertm_send(chan);
5499 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5500 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5501 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5504 control.super = L2CAP_SUPER_RR;
5505 l2cap_send_sframe(chan, &control);
5509 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5510 struct sk_buff **last_frag)
5512 /* skb->len reflects data in skb as well as all fragments
5513 * skb->data_len reflects only data in fragments
5515 if (!skb_has_frag_list(skb))
5516 skb_shinfo(skb)->frag_list = new_frag;
5518 new_frag->next = NULL;
5520 (*last_frag)->next = new_frag;
5521 *last_frag = new_frag;
5523 skb->len += new_frag->len;
5524 skb->data_len += new_frag->len;
5525 skb->truesize += new_frag->truesize;
5528 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5529 struct l2cap_ctrl *control)
5533 switch (control->sar) {
5534 case L2CAP_SAR_UNSEGMENTED:
5538 err = chan->ops->recv(chan, skb);
5541 case L2CAP_SAR_START:
5545 chan->sdu_len = get_unaligned_le16(skb->data);
5546 skb_pull(skb, L2CAP_SDULEN_SIZE);
5548 if (chan->sdu_len > chan->imtu) {
5553 if (skb->len >= chan->sdu_len)
5557 chan->sdu_last_frag = skb;
5563 case L2CAP_SAR_CONTINUE:
5567 append_skb_frag(chan->sdu, skb,
5568 &chan->sdu_last_frag);
5571 if (chan->sdu->len >= chan->sdu_len)
5581 append_skb_frag(chan->sdu, skb,
5582 &chan->sdu_last_frag);
5585 if (chan->sdu->len != chan->sdu_len)
5588 err = chan->ops->recv(chan, chan->sdu);
5591 /* Reassembly complete */
5593 chan->sdu_last_frag = NULL;
5601 kfree_skb(chan->sdu);
5603 chan->sdu_last_frag = NULL;
5610 static int l2cap_resegment(struct l2cap_chan *chan)
5616 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5620 if (chan->mode != L2CAP_MODE_ERTM)
5623 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5624 l2cap_tx(chan, NULL, NULL, event);
5627 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5630 /* Pass sequential frames to l2cap_reassemble_sdu()
5631 * until a gap is encountered.
5634 BT_DBG("chan %p", chan);
5636 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5637 struct sk_buff *skb;
5638 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5639 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5641 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5646 skb_unlink(skb, &chan->srej_q);
5647 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5648 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5653 if (skb_queue_empty(&chan->srej_q)) {
5654 chan->rx_state = L2CAP_RX_STATE_RECV;
5655 l2cap_send_ack(chan);
5661 static void l2cap_handle_srej(struct l2cap_chan *chan,
5662 struct l2cap_ctrl *control)
5664 struct sk_buff *skb;
5666 BT_DBG("chan %p, control %p", chan, control);
5668 if (control->reqseq == chan->next_tx_seq) {
5669 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5670 l2cap_send_disconn_req(chan, ECONNRESET);
5674 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5677 BT_DBG("Seq %d not available for retransmission",
5682 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5683 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5684 l2cap_send_disconn_req(chan, ECONNRESET);
5688 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5690 if (control->poll) {
5691 l2cap_pass_to_tx(chan, control);
5693 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5694 l2cap_retransmit(chan, control);
5695 l2cap_ertm_send(chan);
5697 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5698 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5699 chan->srej_save_reqseq = control->reqseq;
5702 l2cap_pass_to_tx_fbit(chan, control);
5704 if (control->final) {
5705 if (chan->srej_save_reqseq != control->reqseq ||
5706 !test_and_clear_bit(CONN_SREJ_ACT,
5708 l2cap_retransmit(chan, control);
5710 l2cap_retransmit(chan, control);
5711 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5712 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5713 chan->srej_save_reqseq = control->reqseq;
5719 static void l2cap_handle_rej(struct l2cap_chan *chan,
5720 struct l2cap_ctrl *control)
5722 struct sk_buff *skb;
5724 BT_DBG("chan %p, control %p", chan, control);
5726 if (control->reqseq == chan->next_tx_seq) {
5727 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5728 l2cap_send_disconn_req(chan, ECONNRESET);
5732 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5734 if (chan->max_tx && skb &&
5735 bt_cb(skb)->control.retries >= chan->max_tx) {
5736 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5737 l2cap_send_disconn_req(chan, ECONNRESET);
5741 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5743 l2cap_pass_to_tx(chan, control);
5745 if (control->final) {
5746 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5747 l2cap_retransmit_all(chan, control);
5749 l2cap_retransmit_all(chan, control);
5750 l2cap_ertm_send(chan);
5751 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5752 set_bit(CONN_REJ_ACT, &chan->conn_state);
5756 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5758 BT_DBG("chan %p, txseq %d", chan, txseq);
5760 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5761 chan->expected_tx_seq);
5763 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5764 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5766 /* See notes below regarding "double poll" and
5769 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5770 BT_DBG("Invalid/Ignore - after SREJ");
5771 return L2CAP_TXSEQ_INVALID_IGNORE;
5773 BT_DBG("Invalid - in window after SREJ sent");
5774 return L2CAP_TXSEQ_INVALID;
5778 if (chan->srej_list.head == txseq) {
5779 BT_DBG("Expected SREJ");
5780 return L2CAP_TXSEQ_EXPECTED_SREJ;
5783 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5784 BT_DBG("Duplicate SREJ - txseq already stored");
5785 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5788 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5789 BT_DBG("Unexpected SREJ - not requested");
5790 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5794 if (chan->expected_tx_seq == txseq) {
5795 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5797 BT_DBG("Invalid - txseq outside tx window");
5798 return L2CAP_TXSEQ_INVALID;
5801 return L2CAP_TXSEQ_EXPECTED;
5805 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5806 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5807 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5808 return L2CAP_TXSEQ_DUPLICATE;
5811 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5812 /* A source of invalid packets is a "double poll" condition,
5813 * where delays cause us to send multiple poll packets. If
5814 * the remote stack receives and processes both polls,
5815 * sequence numbers can wrap around in such a way that a
5816 * resent frame has a sequence number that looks like new data
5817 * with a sequence gap. This would trigger an erroneous SREJ
5820 * Fortunately, this is impossible with a tx window that's
5821 * less than half of the maximum sequence number, which allows
5822 * invalid frames to be safely ignored.
5824 * With tx window sizes greater than half of the tx window
5825 * maximum, the frame is invalid and cannot be ignored. This
5826 * causes a disconnect.
5829 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5830 BT_DBG("Invalid/Ignore - txseq outside tx window");
5831 return L2CAP_TXSEQ_INVALID_IGNORE;
5833 BT_DBG("Invalid - txseq outside tx window");
5834 return L2CAP_TXSEQ_INVALID;
5837 BT_DBG("Unexpected - txseq indicates missing frames");
5838 return L2CAP_TXSEQ_UNEXPECTED;
5842 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5843 struct l2cap_ctrl *control,
5844 struct sk_buff *skb, u8 event)
5847 bool skb_in_use = false;
5849 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5853 case L2CAP_EV_RECV_IFRAME:
5854 switch (l2cap_classify_txseq(chan, control->txseq)) {
5855 case L2CAP_TXSEQ_EXPECTED:
5856 l2cap_pass_to_tx(chan, control);
5858 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5859 BT_DBG("Busy, discarding expected seq %d",
5864 chan->expected_tx_seq = __next_seq(chan,
5867 chan->buffer_seq = chan->expected_tx_seq;
5870 err = l2cap_reassemble_sdu(chan, skb, control);
5874 if (control->final) {
5875 if (!test_and_clear_bit(CONN_REJ_ACT,
5876 &chan->conn_state)) {
5878 l2cap_retransmit_all(chan, control);
5879 l2cap_ertm_send(chan);
5883 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5884 l2cap_send_ack(chan);
5886 case L2CAP_TXSEQ_UNEXPECTED:
5887 l2cap_pass_to_tx(chan, control);
5889 /* Can't issue SREJ frames in the local busy state.
5890 * Drop this frame, it will be seen as missing
5891 * when local busy is exited.
5893 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5894 BT_DBG("Busy, discarding unexpected seq %d",
5899 /* There was a gap in the sequence, so an SREJ
5900 * must be sent for each missing frame. The
5901 * current frame is stored for later use.
5903 skb_queue_tail(&chan->srej_q, skb);
5905 BT_DBG("Queued %p (queue len %d)", skb,
5906 skb_queue_len(&chan->srej_q));
5908 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5909 l2cap_seq_list_clear(&chan->srej_list);
5910 l2cap_send_srej(chan, control->txseq);
5912 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5914 case L2CAP_TXSEQ_DUPLICATE:
5915 l2cap_pass_to_tx(chan, control);
5917 case L2CAP_TXSEQ_INVALID_IGNORE:
5919 case L2CAP_TXSEQ_INVALID:
5921 l2cap_send_disconn_req(chan, ECONNRESET);
5925 case L2CAP_EV_RECV_RR:
5926 l2cap_pass_to_tx(chan, control);
5927 if (control->final) {
5928 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5930 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5931 !__chan_is_moving(chan)) {
5933 l2cap_retransmit_all(chan, control);
5936 l2cap_ertm_send(chan);
5937 } else if (control->poll) {
5938 l2cap_send_i_or_rr_or_rnr(chan);
5940 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5941 &chan->conn_state) &&
5942 chan->unacked_frames)
5943 __set_retrans_timer(chan);
5945 l2cap_ertm_send(chan);
5948 case L2CAP_EV_RECV_RNR:
5949 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5950 l2cap_pass_to_tx(chan, control);
5951 if (control && control->poll) {
5952 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5953 l2cap_send_rr_or_rnr(chan, 0);
5955 __clear_retrans_timer(chan);
5956 l2cap_seq_list_clear(&chan->retrans_list);
5958 case L2CAP_EV_RECV_REJ:
5959 l2cap_handle_rej(chan, control);
5961 case L2CAP_EV_RECV_SREJ:
5962 l2cap_handle_srej(chan, control);
5968 if (skb && !skb_in_use) {
5969 BT_DBG("Freeing %p", skb);
5976 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5977 struct l2cap_ctrl *control,
5978 struct sk_buff *skb, u8 event)
5981 u16 txseq = control->txseq;
5982 bool skb_in_use = false;
5984 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5988 case L2CAP_EV_RECV_IFRAME:
5989 switch (l2cap_classify_txseq(chan, txseq)) {
5990 case L2CAP_TXSEQ_EXPECTED:
5991 /* Keep frame for reassembly later */
5992 l2cap_pass_to_tx(chan, control);
5993 skb_queue_tail(&chan->srej_q, skb);
5995 BT_DBG("Queued %p (queue len %d)", skb,
5996 skb_queue_len(&chan->srej_q));
5998 chan->expected_tx_seq = __next_seq(chan, txseq);
6000 case L2CAP_TXSEQ_EXPECTED_SREJ:
6001 l2cap_seq_list_pop(&chan->srej_list);
6003 l2cap_pass_to_tx(chan, control);
6004 skb_queue_tail(&chan->srej_q, skb);
6006 BT_DBG("Queued %p (queue len %d)", skb,
6007 skb_queue_len(&chan->srej_q));
6009 err = l2cap_rx_queued_iframes(chan);
6014 case L2CAP_TXSEQ_UNEXPECTED:
6015 /* Got a frame that can't be reassembled yet.
6016 * Save it for later, and send SREJs to cover
6017 * the missing frames.
6019 skb_queue_tail(&chan->srej_q, skb);
6021 BT_DBG("Queued %p (queue len %d)", skb,
6022 skb_queue_len(&chan->srej_q));
6024 l2cap_pass_to_tx(chan, control);
6025 l2cap_send_srej(chan, control->txseq);
6027 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6028 /* This frame was requested with an SREJ, but
6029 * some expected retransmitted frames are
6030 * missing. Request retransmission of missing
6033 skb_queue_tail(&chan->srej_q, skb);
6035 BT_DBG("Queued %p (queue len %d)", skb,
6036 skb_queue_len(&chan->srej_q));
6038 l2cap_pass_to_tx(chan, control);
6039 l2cap_send_srej_list(chan, control->txseq);
6041 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6042 /* We've already queued this frame. Drop this copy. */
6043 l2cap_pass_to_tx(chan, control);
6045 case L2CAP_TXSEQ_DUPLICATE:
6046 /* Expecting a later sequence number, so this frame
6047 * was already received. Ignore it completely.
6050 case L2CAP_TXSEQ_INVALID_IGNORE:
6052 case L2CAP_TXSEQ_INVALID:
6054 l2cap_send_disconn_req(chan, ECONNRESET);
6058 case L2CAP_EV_RECV_RR:
6059 l2cap_pass_to_tx(chan, control);
6060 if (control->final) {
6061 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6063 if (!test_and_clear_bit(CONN_REJ_ACT,
6064 &chan->conn_state)) {
6066 l2cap_retransmit_all(chan, control);
6069 l2cap_ertm_send(chan);
6070 } else if (control->poll) {
6071 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6072 &chan->conn_state) &&
6073 chan->unacked_frames) {
6074 __set_retrans_timer(chan);
6077 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6078 l2cap_send_srej_tail(chan);
6080 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6081 &chan->conn_state) &&
6082 chan->unacked_frames)
6083 __set_retrans_timer(chan);
6085 l2cap_send_ack(chan);
6088 case L2CAP_EV_RECV_RNR:
6089 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6090 l2cap_pass_to_tx(chan, control);
6091 if (control->poll) {
6092 l2cap_send_srej_tail(chan);
6094 struct l2cap_ctrl rr_control;
6095 memset(&rr_control, 0, sizeof(rr_control));
6096 rr_control.sframe = 1;
6097 rr_control.super = L2CAP_SUPER_RR;
6098 rr_control.reqseq = chan->buffer_seq;
6099 l2cap_send_sframe(chan, &rr_control);
6103 case L2CAP_EV_RECV_REJ:
6104 l2cap_handle_rej(chan, control);
6106 case L2CAP_EV_RECV_SREJ:
6107 l2cap_handle_srej(chan, control);
6111 if (skb && !skb_in_use) {
6112 BT_DBG("Freeing %p", skb);
6119 static int l2cap_finish_move(struct l2cap_chan *chan)
6121 BT_DBG("chan %p", chan);
6123 chan->rx_state = L2CAP_RX_STATE_RECV;
6126 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6128 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6130 return l2cap_resegment(chan);
6133 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6134 struct l2cap_ctrl *control,
6135 struct sk_buff *skb, u8 event)
6139 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6145 l2cap_process_reqseq(chan, control->reqseq);
6147 if (!skb_queue_empty(&chan->tx_q))
6148 chan->tx_send_head = skb_peek(&chan->tx_q);
6150 chan->tx_send_head = NULL;
6152 /* Rewind next_tx_seq to the point expected
6155 chan->next_tx_seq = control->reqseq;
6156 chan->unacked_frames = 0;
6158 err = l2cap_finish_move(chan);
6162 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6163 l2cap_send_i_or_rr_or_rnr(chan);
6165 if (event == L2CAP_EV_RECV_IFRAME)
6168 return l2cap_rx_state_recv(chan, control, NULL, event);
6171 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6172 struct l2cap_ctrl *control,
6173 struct sk_buff *skb, u8 event)
6177 if (!control->final)
6180 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6182 chan->rx_state = L2CAP_RX_STATE_RECV;
6183 l2cap_process_reqseq(chan, control->reqseq);
6185 if (!skb_queue_empty(&chan->tx_q))
6186 chan->tx_send_head = skb_peek(&chan->tx_q);
6188 chan->tx_send_head = NULL;
6190 /* Rewind next_tx_seq to the point expected
6193 chan->next_tx_seq = control->reqseq;
6194 chan->unacked_frames = 0;
6197 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6199 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6201 err = l2cap_resegment(chan);
6204 err = l2cap_rx_state_recv(chan, control, skb, event);
6209 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6211 /* Make sure reqseq is for a packet that has been sent but not acked */
6214 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6215 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6218 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6219 struct sk_buff *skb, u8 event)
6223 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6224 control, skb, event, chan->rx_state);
6226 if (__valid_reqseq(chan, control->reqseq)) {
6227 switch (chan->rx_state) {
6228 case L2CAP_RX_STATE_RECV:
6229 err = l2cap_rx_state_recv(chan, control, skb, event);
6231 case L2CAP_RX_STATE_SREJ_SENT:
6232 err = l2cap_rx_state_srej_sent(chan, control, skb,
6235 case L2CAP_RX_STATE_WAIT_P:
6236 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6238 case L2CAP_RX_STATE_WAIT_F:
6239 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6246 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6247 control->reqseq, chan->next_tx_seq,
6248 chan->expected_ack_seq);
6249 l2cap_send_disconn_req(chan, ECONNRESET);
6255 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6256 struct sk_buff *skb)
6260 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6263 if (l2cap_classify_txseq(chan, control->txseq) ==
6264 L2CAP_TXSEQ_EXPECTED) {
6265 l2cap_pass_to_tx(chan, control);
6267 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6268 __next_seq(chan, chan->buffer_seq));
6270 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6272 l2cap_reassemble_sdu(chan, skb, control);
6275 kfree_skb(chan->sdu);
6278 chan->sdu_last_frag = NULL;
6282 BT_DBG("Freeing %p", skb);
6287 chan->last_acked_seq = control->txseq;
6288 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6293 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6295 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6299 __unpack_control(chan, skb);
6304 * We can just drop the corrupted I-frame here.
6305 * Receiver will miss it and start proper recovery
6306 * procedures and ask for retransmission.
6308 if (l2cap_check_fcs(chan, skb))
6311 if (!control->sframe && control->sar == L2CAP_SAR_START)
6312 len -= L2CAP_SDULEN_SIZE;
6314 if (chan->fcs == L2CAP_FCS_CRC16)
6315 len -= L2CAP_FCS_SIZE;
6317 if (len > chan->mps) {
6318 l2cap_send_disconn_req(chan, ECONNRESET);
6322 if (!control->sframe) {
6325 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6326 control->sar, control->reqseq, control->final,
6329 /* Validate F-bit - F=0 always valid, F=1 only
6330 * valid in TX WAIT_F
6332 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6335 if (chan->mode != L2CAP_MODE_STREAMING) {
6336 event = L2CAP_EV_RECV_IFRAME;
6337 err = l2cap_rx(chan, control, skb, event);
6339 err = l2cap_stream_rx(chan, control, skb);
6343 l2cap_send_disconn_req(chan, ECONNRESET);
6345 const u8 rx_func_to_event[4] = {
6346 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6347 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6350 /* Only I-frames are expected in streaming mode */
6351 if (chan->mode == L2CAP_MODE_STREAMING)
6354 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6355 control->reqseq, control->final, control->poll,
6359 BT_ERR("Trailing bytes: %d in sframe", len);
6360 l2cap_send_disconn_req(chan, ECONNRESET);
6364 /* Validate F and P bits */
6365 if (control->final && (control->poll ||
6366 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6369 event = rx_func_to_event[control->super];
6370 if (l2cap_rx(chan, control, skb, event))
6371 l2cap_send_disconn_req(chan, ECONNRESET);
6381 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6382 struct sk_buff *skb)
6384 struct l2cap_chan *chan;
6386 chan = l2cap_get_chan_by_scid(conn, cid);
6388 if (cid == L2CAP_CID_A2MP) {
6389 chan = a2mp_channel_create(conn, skb);
6395 l2cap_chan_lock(chan);
6397 BT_DBG("unknown cid 0x%4.4x", cid);
6398 /* Drop packet and return */
6404 BT_DBG("chan %p, len %d", chan, skb->len);
6406 if (chan->state != BT_CONNECTED)
6409 switch (chan->mode) {
6410 case L2CAP_MODE_BASIC:
6411 /* If socket recv buffers overflows we drop data here
6412 * which is *bad* because L2CAP has to be reliable.
6413 * But we don't have any other choice. L2CAP doesn't
6414 * provide flow control mechanism. */
6416 if (chan->imtu < skb->len)
6419 if (!chan->ops->recv(chan, skb))
6423 case L2CAP_MODE_ERTM:
6424 case L2CAP_MODE_STREAMING:
6425 l2cap_data_rcv(chan, skb);
6429 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6437 l2cap_chan_unlock(chan);
6440 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6441 struct sk_buff *skb)
6443 struct hci_conn *hcon = conn->hcon;
6444 struct l2cap_chan *chan;
6446 if (hcon->type != ACL_LINK)
6449 chan = l2cap_global_chan_by_psm(0, psm, &conn->hcon->src,
6454 BT_DBG("chan %p, len %d", chan, skb->len);
6456 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6459 if (chan->imtu < skb->len)
6462 /* Store remote BD_ADDR and PSM for msg_name */
6463 bacpy(&bt_cb(skb)->bdaddr, &conn->hcon->dst);
6464 bt_cb(skb)->psm = psm;
6466 if (!chan->ops->recv(chan, skb))
6473 static void l2cap_att_channel(struct l2cap_conn *conn,
6474 struct sk_buff *skb)
6476 struct hci_conn *hcon = conn->hcon;
6477 struct l2cap_chan *chan;
6479 if (hcon->type != LE_LINK)
6482 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6483 &conn->hcon->src, &conn->hcon->dst);
6487 BT_DBG("chan %p, len %d", chan, skb->len);
6489 if (chan->imtu < skb->len)
6492 if (!chan->ops->recv(chan, skb))
6499 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6501 struct l2cap_hdr *lh = (void *) skb->data;
6505 skb_pull(skb, L2CAP_HDR_SIZE);
6506 cid = __le16_to_cpu(lh->cid);
6507 len = __le16_to_cpu(lh->len);
6509 if (len != skb->len) {
6514 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6517 case L2CAP_CID_SIGNALING:
6518 l2cap_sig_channel(conn, skb);
6521 case L2CAP_CID_CONN_LESS:
6522 psm = get_unaligned((__le16 *) skb->data);
6523 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6524 l2cap_conless_channel(conn, psm, skb);
6528 l2cap_att_channel(conn, skb);
6531 case L2CAP_CID_LE_SIGNALING:
6532 l2cap_le_sig_channel(conn, skb);
6536 if (smp_sig_channel(conn, skb))
6537 l2cap_conn_del(conn->hcon, EACCES);
6541 l2cap_data_channel(conn, cid, skb);
6546 /* ---- L2CAP interface with lower layer (HCI) ---- */
6548 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6550 int exact = 0, lm1 = 0, lm2 = 0;
6551 struct l2cap_chan *c;
6553 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6555 /* Find listening sockets and check their link_mode */
6556 read_lock(&chan_list_lock);
6557 list_for_each_entry(c, &chan_list, global_l) {
6558 if (c->state != BT_LISTEN)
6561 if (!bacmp(&c->src, &hdev->bdaddr)) {
6562 lm1 |= HCI_LM_ACCEPT;
6563 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6564 lm1 |= HCI_LM_MASTER;
6566 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6567 lm2 |= HCI_LM_ACCEPT;
6568 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6569 lm2 |= HCI_LM_MASTER;
6572 read_unlock(&chan_list_lock);
6574 return exact ? lm1 : lm2;
6577 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6579 struct l2cap_conn *conn;
6581 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6584 conn = l2cap_conn_add(hcon);
6586 l2cap_conn_ready(conn);
6588 l2cap_conn_del(hcon, bt_to_errno(status));
6592 int l2cap_disconn_ind(struct hci_conn *hcon)
6594 struct l2cap_conn *conn = hcon->l2cap_data;
6596 BT_DBG("hcon %p", hcon);
6599 return HCI_ERROR_REMOTE_USER_TERM;
6600 return conn->disc_reason;
6603 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6605 BT_DBG("hcon %p reason %d", hcon, reason);
6607 l2cap_conn_del(hcon, bt_to_errno(reason));
6610 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6612 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6615 if (encrypt == 0x00) {
6616 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6617 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6618 } else if (chan->sec_level == BT_SECURITY_HIGH)
6619 l2cap_chan_close(chan, ECONNREFUSED);
6621 if (chan->sec_level == BT_SECURITY_MEDIUM)
6622 __clear_chan_timer(chan);
6626 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6628 struct l2cap_conn *conn = hcon->l2cap_data;
6629 struct l2cap_chan *chan;
6634 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6636 if (hcon->type == LE_LINK) {
6637 if (!status && encrypt)
6638 smp_distribute_keys(conn, 0);
6639 cancel_delayed_work(&conn->security_timer);
6642 mutex_lock(&conn->chan_lock);
6644 list_for_each_entry(chan, &conn->chan_l, list) {
6645 l2cap_chan_lock(chan);
6647 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6648 state_to_string(chan->state));
6650 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6651 l2cap_chan_unlock(chan);
6655 if (chan->scid == L2CAP_CID_ATT) {
6656 if (!status && encrypt) {
6657 chan->sec_level = hcon->sec_level;
6658 l2cap_chan_ready(chan);
6661 l2cap_chan_unlock(chan);
6665 if (!__l2cap_no_conn_pending(chan)) {
6666 l2cap_chan_unlock(chan);
6670 if (!status && (chan->state == BT_CONNECTED ||
6671 chan->state == BT_CONFIG)) {
6672 struct sock *sk = chan->sk;
6674 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6675 sk->sk_state_change(sk);
6677 l2cap_check_encryption(chan, encrypt);
6678 l2cap_chan_unlock(chan);
6682 if (chan->state == BT_CONNECT) {
6684 l2cap_start_connection(chan);
6686 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6688 } else if (chan->state == BT_CONNECT2) {
6689 struct sock *sk = chan->sk;
6690 struct l2cap_conn_rsp rsp;
6696 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6697 res = L2CAP_CR_PEND;
6698 stat = L2CAP_CS_AUTHOR_PEND;
6699 chan->ops->defer(chan);
6701 __l2cap_state_change(chan, BT_CONFIG);
6702 res = L2CAP_CR_SUCCESS;
6703 stat = L2CAP_CS_NO_INFO;
6706 __l2cap_state_change(chan, BT_DISCONN);
6707 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6708 res = L2CAP_CR_SEC_BLOCK;
6709 stat = L2CAP_CS_NO_INFO;
6714 rsp.scid = cpu_to_le16(chan->dcid);
6715 rsp.dcid = cpu_to_le16(chan->scid);
6716 rsp.result = cpu_to_le16(res);
6717 rsp.status = cpu_to_le16(stat);
6718 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6721 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6722 res == L2CAP_CR_SUCCESS) {
6724 set_bit(CONF_REQ_SENT, &chan->conf_state);
6725 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6727 l2cap_build_conf_req(chan, buf),
6729 chan->num_conf_req++;
6733 l2cap_chan_unlock(chan);
6736 mutex_unlock(&conn->chan_lock);
6741 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6743 struct l2cap_conn *conn = hcon->l2cap_data;
6744 struct l2cap_hdr *hdr;
6747 /* For AMP controller do not create l2cap conn */
6748 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6752 conn = l2cap_conn_add(hcon);
6757 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6761 case ACL_START_NO_FLUSH:
6764 BT_ERR("Unexpected start frame (len %d)", skb->len);
6765 kfree_skb(conn->rx_skb);
6766 conn->rx_skb = NULL;
6768 l2cap_conn_unreliable(conn, ECOMM);
6771 /* Start fragment always begin with Basic L2CAP header */
6772 if (skb->len < L2CAP_HDR_SIZE) {
6773 BT_ERR("Frame is too short (len %d)", skb->len);
6774 l2cap_conn_unreliable(conn, ECOMM);
6778 hdr = (struct l2cap_hdr *) skb->data;
6779 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6781 if (len == skb->len) {
6782 /* Complete frame received */
6783 l2cap_recv_frame(conn, skb);
6787 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6789 if (skb->len > len) {
6790 BT_ERR("Frame is too long (len %d, expected len %d)",
6792 l2cap_conn_unreliable(conn, ECOMM);
6796 /* Allocate skb for the complete frame (with header) */
6797 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6801 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6803 conn->rx_len = len - skb->len;
6807 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6809 if (!conn->rx_len) {
6810 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6811 l2cap_conn_unreliable(conn, ECOMM);
6815 if (skb->len > conn->rx_len) {
6816 BT_ERR("Fragment is too long (len %d, expected %d)",
6817 skb->len, conn->rx_len);
6818 kfree_skb(conn->rx_skb);
6819 conn->rx_skb = NULL;
6821 l2cap_conn_unreliable(conn, ECOMM);
6825 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6827 conn->rx_len -= skb->len;
6829 if (!conn->rx_len) {
6830 /* Complete frame received. l2cap_recv_frame
6831 * takes ownership of the skb so set the global
6832 * rx_skb pointer to NULL first.
6834 struct sk_buff *rx_skb = conn->rx_skb;
6835 conn->rx_skb = NULL;
6836 l2cap_recv_frame(conn, rx_skb);
6846 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6848 struct l2cap_chan *c;
6850 read_lock(&chan_list_lock);
6852 list_for_each_entry(c, &chan_list, global_l) {
6853 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6855 c->state, __le16_to_cpu(c->psm),
6856 c->scid, c->dcid, c->imtu, c->omtu,
6857 c->sec_level, c->mode);
6860 read_unlock(&chan_list_lock);
6865 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6867 return single_open(file, l2cap_debugfs_show, inode->i_private);
6870 static const struct file_operations l2cap_debugfs_fops = {
6871 .open = l2cap_debugfs_open,
6873 .llseek = seq_lseek,
6874 .release = single_release,
6877 static struct dentry *l2cap_debugfs;
6879 int __init l2cap_init(void)
6883 err = l2cap_init_sockets();
6888 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6889 NULL, &l2cap_debugfs_fops);
6891 BT_ERR("Failed to create L2CAP debug file");
6897 void l2cap_exit(void)
6899 debugfs_remove(l2cap_debugfs);
6900 l2cap_cleanup_sockets();
6903 module_param(disable_ertm, bool, 0644);
6904 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");