2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
68 list_for_each_entry(c, &conn->chan_l, list) {
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
80 list_for_each_entry(c, &conn->chan_l, list) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
98 mutex_unlock(&conn->chan_lock);
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
115 mutex_unlock(&conn->chan_lock);
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
123 struct l2cap_chan *c;
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
141 mutex_unlock(&conn->chan_lock);
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
161 write_lock(&chan_list_lock);
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
186 write_unlock(&chan_list_lock);
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192 write_lock(&chan_list_lock);
196 write_unlock(&chan_list_lock);
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203 u16 cid = L2CAP_CID_DYN_START;
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
219 chan->ops->state_change(chan, state);
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
224 struct sock *sk = chan->sk;
227 __l2cap_state_change(chan, state);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233 struct sock *sk = chan->sk;
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240 struct sock *sk = chan->sk;
243 __l2cap_chan_set_err(chan, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
510 chan->scid = l2cap_alloc_cid(conn);
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546 l2cap_chan_hold(chan);
548 hci_conn_hold(conn->hcon);
550 list_add(&chan->list, &conn->chan_l);
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
562 struct l2cap_conn *conn = chan->conn;
564 __clear_chan_timer(chan);
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
573 l2cap_chan_put(chan);
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
591 chan->ops->teardown(chan, err);
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
597 case L2CAP_MODE_BASIC:
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
605 skb_queue_purge(&chan->srej_q);
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
622 struct l2cap_conn *conn = chan->conn;
623 struct sock *sk = chan->sk;
625 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
628 switch (chan->state) {
630 chan->ops->teardown(chan, 0);
635 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
636 conn->hcon->type == ACL_LINK) {
637 __set_chan_timer(chan, sk->sk_sndtimeo);
638 l2cap_send_disconn_req(chan, reason);
640 l2cap_chan_del(chan, reason);
644 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
645 conn->hcon->type == ACL_LINK) {
646 struct l2cap_conn_rsp rsp;
649 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
650 result = L2CAP_CR_SEC_BLOCK;
652 result = L2CAP_CR_BAD_PSM;
653 l2cap_state_change(chan, BT_DISCONN);
655 rsp.scid = cpu_to_le16(chan->dcid);
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.result = cpu_to_le16(result);
658 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
659 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
663 l2cap_chan_del(chan, reason);
668 l2cap_chan_del(chan, reason);
672 chan->ops->teardown(chan, 0);
677 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
679 if (chan->chan_type == L2CAP_CHAN_RAW) {
680 switch (chan->sec_level) {
681 case BT_SECURITY_HIGH:
682 return HCI_AT_DEDICATED_BONDING_MITM;
683 case BT_SECURITY_MEDIUM:
684 return HCI_AT_DEDICATED_BONDING;
686 return HCI_AT_NO_BONDING;
688 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
689 if (chan->sec_level == BT_SECURITY_LOW)
690 chan->sec_level = BT_SECURITY_SDP;
692 if (chan->sec_level == BT_SECURITY_HIGH)
693 return HCI_AT_NO_BONDING_MITM;
695 return HCI_AT_NO_BONDING;
697 switch (chan->sec_level) {
698 case BT_SECURITY_HIGH:
699 return HCI_AT_GENERAL_BONDING_MITM;
700 case BT_SECURITY_MEDIUM:
701 return HCI_AT_GENERAL_BONDING;
703 return HCI_AT_NO_BONDING;
708 /* Service level security */
709 int l2cap_chan_check_security(struct l2cap_chan *chan)
711 struct l2cap_conn *conn = chan->conn;
714 auth_type = l2cap_get_auth_type(chan);
716 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
719 static u8 l2cap_get_ident(struct l2cap_conn *conn)
723 /* Get next available identificator.
724 * 1 - 128 are used by kernel.
725 * 129 - 199 are reserved.
726 * 200 - 254 are used by utilities like l2ping, etc.
729 spin_lock(&conn->lock);
731 if (++conn->tx_ident > 128)
736 spin_unlock(&conn->lock);
741 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
744 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
747 BT_DBG("code 0x%2.2x", code);
752 if (lmp_no_flush_capable(conn->hcon->hdev))
753 flags = ACL_START_NO_FLUSH;
757 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
758 skb->priority = HCI_PRIO_MAX;
760 hci_send_acl(conn->hchan, skb, flags);
763 static bool __chan_is_moving(struct l2cap_chan *chan)
765 return chan->move_state != L2CAP_MOVE_STABLE &&
766 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
769 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
771 struct hci_conn *hcon = chan->conn->hcon;
774 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
777 if (chan->hs_hcon && !__chan_is_moving(chan)) {
779 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
786 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
787 lmp_no_flush_capable(hcon->hdev))
788 flags = ACL_START_NO_FLUSH;
792 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
793 hci_send_acl(chan->conn->hchan, skb, flags);
796 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
798 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
799 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
801 if (enh & L2CAP_CTRL_FRAME_TYPE) {
804 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
805 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
812 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
813 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
820 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
822 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
823 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
825 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
828 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
829 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
836 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
837 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
844 static inline void __unpack_control(struct l2cap_chan *chan,
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 __unpack_extended_control(get_unaligned_le32(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
852 __unpack_enhanced_control(get_unaligned_le16(skb->data),
853 &bt_cb(skb)->control);
854 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
858 static u32 __pack_extended_control(struct l2cap_ctrl *control)
862 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
863 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
865 if (control->sframe) {
866 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
867 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
868 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
870 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
871 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
877 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
881 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
882 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
884 if (control->sframe) {
885 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
886 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
887 packed |= L2CAP_CTRL_FRAME_TYPE;
889 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
890 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
896 static inline void __pack_control(struct l2cap_chan *chan,
897 struct l2cap_ctrl *control,
900 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
901 put_unaligned_le32(__pack_extended_control(control),
902 skb->data + L2CAP_HDR_SIZE);
904 put_unaligned_le16(__pack_enhanced_control(control),
905 skb->data + L2CAP_HDR_SIZE);
909 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
911 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
912 return L2CAP_EXT_HDR_SIZE;
914 return L2CAP_ENH_HDR_SIZE;
917 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
921 struct l2cap_hdr *lh;
922 int hlen = __ertm_hdr_size(chan);
924 if (chan->fcs == L2CAP_FCS_CRC16)
925 hlen += L2CAP_FCS_SIZE;
927 skb = bt_skb_alloc(hlen, GFP_KERNEL);
930 return ERR_PTR(-ENOMEM);
932 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
933 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
934 lh->cid = cpu_to_le16(chan->dcid);
936 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
937 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
939 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
941 if (chan->fcs == L2CAP_FCS_CRC16) {
942 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
943 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
946 skb->priority = HCI_PRIO_MAX;
950 static void l2cap_send_sframe(struct l2cap_chan *chan,
951 struct l2cap_ctrl *control)
956 BT_DBG("chan %p, control %p", chan, control);
958 if (!control->sframe)
961 if (__chan_is_moving(chan))
964 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
968 if (control->super == L2CAP_SUPER_RR)
969 clear_bit(CONN_RNR_SENT, &chan->conn_state);
970 else if (control->super == L2CAP_SUPER_RNR)
971 set_bit(CONN_RNR_SENT, &chan->conn_state);
973 if (control->super != L2CAP_SUPER_SREJ) {
974 chan->last_acked_seq = control->reqseq;
975 __clear_ack_timer(chan);
978 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
979 control->final, control->poll, control->super);
981 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
982 control_field = __pack_extended_control(control);
984 control_field = __pack_enhanced_control(control);
986 skb = l2cap_create_sframe_pdu(chan, control_field);
988 l2cap_do_send(chan, skb);
991 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
993 struct l2cap_ctrl control;
995 BT_DBG("chan %p, poll %d", chan, poll);
997 memset(&control, 0, sizeof(control));
1001 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1002 control.super = L2CAP_SUPER_RNR;
1004 control.super = L2CAP_SUPER_RR;
1006 control.reqseq = chan->buffer_seq;
1007 l2cap_send_sframe(chan, &control);
1010 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1012 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1015 static bool __amp_capable(struct l2cap_chan *chan)
1017 struct l2cap_conn *conn = chan->conn;
1019 if (conn->hs_enabled && hci_amp_capable() &&
1020 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1021 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1027 static bool l2cap_check_efs(struct l2cap_chan *chan)
1029 /* Check EFS parameters */
1033 void l2cap_send_conn_req(struct l2cap_chan *chan)
1035 struct l2cap_conn *conn = chan->conn;
1036 struct l2cap_conn_req req;
1038 req.scid = cpu_to_le16(chan->scid);
1039 req.psm = chan->psm;
1041 chan->ident = l2cap_get_ident(conn);
1043 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1045 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1048 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1050 struct l2cap_create_chan_req req;
1051 req.scid = cpu_to_le16(chan->scid);
1052 req.psm = chan->psm;
1053 req.amp_id = amp_id;
1055 chan->ident = l2cap_get_ident(chan->conn);
1057 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1061 static void l2cap_move_setup(struct l2cap_chan *chan)
1063 struct sk_buff *skb;
1065 BT_DBG("chan %p", chan);
1067 if (chan->mode != L2CAP_MODE_ERTM)
1070 __clear_retrans_timer(chan);
1071 __clear_monitor_timer(chan);
1072 __clear_ack_timer(chan);
1074 chan->retry_count = 0;
1075 skb_queue_walk(&chan->tx_q, skb) {
1076 if (bt_cb(skb)->control.retries)
1077 bt_cb(skb)->control.retries = 1;
1082 chan->expected_tx_seq = chan->buffer_seq;
1084 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1085 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1086 l2cap_seq_list_clear(&chan->retrans_list);
1087 l2cap_seq_list_clear(&chan->srej_list);
1088 skb_queue_purge(&chan->srej_q);
1090 chan->tx_state = L2CAP_TX_STATE_XMIT;
1091 chan->rx_state = L2CAP_RX_STATE_MOVE;
1093 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1096 static void l2cap_move_done(struct l2cap_chan *chan)
1098 u8 move_role = chan->move_role;
1099 BT_DBG("chan %p", chan);
1101 chan->move_state = L2CAP_MOVE_STABLE;
1102 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1104 if (chan->mode != L2CAP_MODE_ERTM)
1107 switch (move_role) {
1108 case L2CAP_MOVE_ROLE_INITIATOR:
1109 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1112 case L2CAP_MOVE_ROLE_RESPONDER:
1113 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1118 static void l2cap_chan_ready(struct l2cap_chan *chan)
1120 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1121 chan->conf_state = 0;
1122 __clear_chan_timer(chan);
1124 chan->state = BT_CONNECTED;
1126 chan->ops->ready(chan);
1129 static void l2cap_start_connection(struct l2cap_chan *chan)
1131 if (__amp_capable(chan)) {
1132 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1133 a2mp_discover_amp(chan);
1135 l2cap_send_conn_req(chan);
1139 static void l2cap_do_start(struct l2cap_chan *chan)
1141 struct l2cap_conn *conn = chan->conn;
1143 if (conn->hcon->type == LE_LINK) {
1144 l2cap_chan_ready(chan);
1148 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1149 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1152 if (l2cap_chan_check_security(chan) &&
1153 __l2cap_no_conn_pending(chan)) {
1154 l2cap_start_connection(chan);
1157 struct l2cap_info_req req;
1158 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1160 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1161 conn->info_ident = l2cap_get_ident(conn);
1163 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1165 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1170 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1172 u32 local_feat_mask = l2cap_feat_mask;
1174 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1177 case L2CAP_MODE_ERTM:
1178 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1179 case L2CAP_MODE_STREAMING:
1180 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1186 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1188 struct sock *sk = chan->sk;
1189 struct l2cap_conn *conn = chan->conn;
1190 struct l2cap_disconn_req req;
1195 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1196 __clear_retrans_timer(chan);
1197 __clear_monitor_timer(chan);
1198 __clear_ack_timer(chan);
1201 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1202 l2cap_state_change(chan, BT_DISCONN);
1206 req.dcid = cpu_to_le16(chan->dcid);
1207 req.scid = cpu_to_le16(chan->scid);
1208 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1212 __l2cap_state_change(chan, BT_DISCONN);
1213 __l2cap_chan_set_err(chan, err);
1217 /* ---- L2CAP connections ---- */
1218 static void l2cap_conn_start(struct l2cap_conn *conn)
1220 struct l2cap_chan *chan, *tmp;
1222 BT_DBG("conn %p", conn);
1224 mutex_lock(&conn->chan_lock);
1226 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1227 struct sock *sk = chan->sk;
1229 l2cap_chan_lock(chan);
1231 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1232 l2cap_chan_unlock(chan);
1236 if (chan->state == BT_CONNECT) {
1237 if (!l2cap_chan_check_security(chan) ||
1238 !__l2cap_no_conn_pending(chan)) {
1239 l2cap_chan_unlock(chan);
1243 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1244 && test_bit(CONF_STATE2_DEVICE,
1245 &chan->conf_state)) {
1246 l2cap_chan_close(chan, ECONNRESET);
1247 l2cap_chan_unlock(chan);
1251 l2cap_start_connection(chan);
1253 } else if (chan->state == BT_CONNECT2) {
1254 struct l2cap_conn_rsp rsp;
1256 rsp.scid = cpu_to_le16(chan->dcid);
1257 rsp.dcid = cpu_to_le16(chan->scid);
1259 if (l2cap_chan_check_security(chan)) {
1261 if (test_bit(BT_SK_DEFER_SETUP,
1262 &bt_sk(sk)->flags)) {
1263 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1264 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1265 chan->ops->defer(chan);
1268 __l2cap_state_change(chan, BT_CONFIG);
1269 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1270 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1274 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1275 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1278 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1281 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1282 rsp.result != L2CAP_CR_SUCCESS) {
1283 l2cap_chan_unlock(chan);
1287 set_bit(CONF_REQ_SENT, &chan->conf_state);
1288 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1289 l2cap_build_conf_req(chan, buf), buf);
1290 chan->num_conf_req++;
1293 l2cap_chan_unlock(chan);
1296 mutex_unlock(&conn->chan_lock);
1299 /* Find socket with cid and source/destination bdaddr.
1300 * Returns closest match, locked.
1302 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1306 struct l2cap_chan *c, *c1 = NULL;
1308 read_lock(&chan_list_lock);
1310 list_for_each_entry(c, &chan_list, global_l) {
1311 struct sock *sk = c->sk;
1313 if (state && c->state != state)
1316 if (c->scid == cid) {
1317 int src_match, dst_match;
1318 int src_any, dst_any;
1321 src_match = !bacmp(&bt_sk(sk)->src, src);
1322 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1323 if (src_match && dst_match) {
1324 read_unlock(&chan_list_lock);
1329 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1330 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1331 if ((src_match && dst_any) || (src_any && dst_match) ||
1332 (src_any && dst_any))
1337 read_unlock(&chan_list_lock);
1342 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1344 struct sock *parent;
1345 struct l2cap_chan *chan, *pchan;
1349 /* Check if we have socket listening on cid */
1350 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1351 conn->src, conn->dst);
1355 /* Client ATT sockets should override the server one */
1356 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1363 chan = pchan->ops->new_connection(pchan);
1367 chan->dcid = L2CAP_CID_ATT;
1369 bacpy(&bt_sk(chan->sk)->src, conn->src);
1370 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1372 __l2cap_chan_add(conn, chan);
1375 release_sock(parent);
1378 static void l2cap_conn_ready(struct l2cap_conn *conn)
1380 struct l2cap_chan *chan;
1381 struct hci_conn *hcon = conn->hcon;
1383 BT_DBG("conn %p", conn);
1385 /* For outgoing pairing which doesn't necessarily have an
1386 * associated socket (e.g. mgmt_pair_device).
1388 if (hcon->out && hcon->type == LE_LINK)
1389 smp_conn_security(hcon, hcon->pending_sec_level);
1391 mutex_lock(&conn->chan_lock);
1393 if (hcon->type == LE_LINK)
1394 l2cap_le_conn_ready(conn);
1396 list_for_each_entry(chan, &conn->chan_l, list) {
1398 l2cap_chan_lock(chan);
1400 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1401 l2cap_chan_unlock(chan);
1405 if (hcon->type == LE_LINK) {
1406 if (smp_conn_security(hcon, chan->sec_level))
1407 l2cap_chan_ready(chan);
1409 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1410 struct sock *sk = chan->sk;
1411 __clear_chan_timer(chan);
1413 __l2cap_state_change(chan, BT_CONNECTED);
1414 sk->sk_state_change(sk);
1417 } else if (chan->state == BT_CONNECT) {
1418 l2cap_do_start(chan);
1421 l2cap_chan_unlock(chan);
1424 mutex_unlock(&conn->chan_lock);
1427 /* Notify sockets that we cannot guaranty reliability anymore */
1428 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1430 struct l2cap_chan *chan;
1432 BT_DBG("conn %p", conn);
1434 mutex_lock(&conn->chan_lock);
1436 list_for_each_entry(chan, &conn->chan_l, list) {
1437 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1438 l2cap_chan_set_err(chan, err);
1441 mutex_unlock(&conn->chan_lock);
1444 static void l2cap_info_timeout(struct work_struct *work)
1446 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1449 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1450 conn->info_ident = 0;
1452 l2cap_conn_start(conn);
1457 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1458 * callback is called during registration. The ->remove callback is called
1459 * during unregistration.
1460 * An l2cap_user object can either be explicitly unregistered or when the
1461 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1462 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1463 * External modules must own a reference to the l2cap_conn object if they intend
1464 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1465 * any time if they don't.
1468 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1470 struct hci_dev *hdev = conn->hcon->hdev;
1473 /* We need to check whether l2cap_conn is registered. If it is not, we
1474 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1475 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1476 * relies on the parent hci_conn object to be locked. This itself relies
1477 * on the hci_dev object to be locked. So we must lock the hci device
1482 if (user->list.next || user->list.prev) {
1487 /* conn->hchan is NULL after l2cap_conn_del() was called */
1493 ret = user->probe(conn, user);
1497 list_add(&user->list, &conn->users);
1501 hci_dev_unlock(hdev);
1504 EXPORT_SYMBOL(l2cap_register_user);
1506 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1508 struct hci_dev *hdev = conn->hcon->hdev;
1512 if (!user->list.next || !user->list.prev)
1515 list_del(&user->list);
1516 user->list.next = NULL;
1517 user->list.prev = NULL;
1518 user->remove(conn, user);
1521 hci_dev_unlock(hdev);
1523 EXPORT_SYMBOL(l2cap_unregister_user);
1525 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1527 struct l2cap_user *user;
1529 while (!list_empty(&conn->users)) {
1530 user = list_first_entry(&conn->users, struct l2cap_user, list);
1531 list_del(&user->list);
1532 user->list.next = NULL;
1533 user->list.prev = NULL;
1534 user->remove(conn, user);
1538 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1540 struct l2cap_conn *conn = hcon->l2cap_data;
1541 struct l2cap_chan *chan, *l;
1546 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1548 kfree_skb(conn->rx_skb);
1550 l2cap_unregister_all_users(conn);
1552 mutex_lock(&conn->chan_lock);
1555 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1556 l2cap_chan_hold(chan);
1557 l2cap_chan_lock(chan);
1559 l2cap_chan_del(chan, err);
1561 l2cap_chan_unlock(chan);
1563 chan->ops->close(chan);
1564 l2cap_chan_put(chan);
1567 mutex_unlock(&conn->chan_lock);
1569 hci_chan_del(conn->hchan);
1571 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1572 cancel_delayed_work_sync(&conn->info_timer);
1574 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1575 cancel_delayed_work_sync(&conn->security_timer);
1576 smp_chan_destroy(conn);
1579 hcon->l2cap_data = NULL;
1581 l2cap_conn_put(conn);
1584 static void security_timeout(struct work_struct *work)
1586 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1587 security_timer.work);
1589 BT_DBG("conn %p", conn);
1591 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1592 smp_chan_destroy(conn);
1593 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1597 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1599 struct l2cap_conn *conn = hcon->l2cap_data;
1600 struct hci_chan *hchan;
1605 hchan = hci_chan_create(hcon);
1609 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1611 hci_chan_del(hchan);
1615 kref_init(&conn->ref);
1616 hcon->l2cap_data = conn;
1618 hci_conn_get(conn->hcon);
1619 conn->hchan = hchan;
1621 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1623 switch (hcon->type) {
1625 if (hcon->hdev->le_mtu) {
1626 conn->mtu = hcon->hdev->le_mtu;
1631 conn->mtu = hcon->hdev->acl_mtu;
1635 conn->src = &hcon->hdev->bdaddr;
1636 conn->dst = &hcon->dst;
1638 conn->feat_mask = 0;
1640 if (hcon->type == ACL_LINK)
1641 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1642 &hcon->hdev->dev_flags);
1644 spin_lock_init(&conn->lock);
1645 mutex_init(&conn->chan_lock);
1647 INIT_LIST_HEAD(&conn->chan_l);
1648 INIT_LIST_HEAD(&conn->users);
1650 if (hcon->type == LE_LINK)
1651 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1653 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1655 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1660 static void l2cap_conn_free(struct kref *ref)
1662 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1664 hci_conn_put(conn->hcon);
1668 void l2cap_conn_get(struct l2cap_conn *conn)
1670 kref_get(&conn->ref);
1672 EXPORT_SYMBOL(l2cap_conn_get);
1674 void l2cap_conn_put(struct l2cap_conn *conn)
1676 kref_put(&conn->ref, l2cap_conn_free);
1678 EXPORT_SYMBOL(l2cap_conn_put);
1680 /* ---- Socket interface ---- */
1682 /* Find socket with psm and source / destination bdaddr.
1683 * Returns closest match.
1685 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1689 struct l2cap_chan *c, *c1 = NULL;
1691 read_lock(&chan_list_lock);
1693 list_for_each_entry(c, &chan_list, global_l) {
1694 struct sock *sk = c->sk;
1696 if (state && c->state != state)
1699 if (c->psm == psm) {
1700 int src_match, dst_match;
1701 int src_any, dst_any;
1704 src_match = !bacmp(&bt_sk(sk)->src, src);
1705 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1706 if (src_match && dst_match) {
1707 read_unlock(&chan_list_lock);
1712 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1713 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1714 if ((src_match && dst_any) || (src_any && dst_match) ||
1715 (src_any && dst_any))
1720 read_unlock(&chan_list_lock);
1725 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1726 bdaddr_t *dst, u8 dst_type)
1728 struct sock *sk = chan->sk;
1729 bdaddr_t *src = &bt_sk(sk)->src;
1730 struct l2cap_conn *conn;
1731 struct hci_conn *hcon;
1732 struct hci_dev *hdev;
1736 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1737 dst_type, __le16_to_cpu(psm));
1739 hdev = hci_get_route(dst, src);
1741 return -EHOSTUNREACH;
1745 l2cap_chan_lock(chan);
1747 /* PSM must be odd and lsb of upper byte must be 0 */
1748 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1749 chan->chan_type != L2CAP_CHAN_RAW) {
1754 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1759 switch (chan->mode) {
1760 case L2CAP_MODE_BASIC:
1762 case L2CAP_MODE_ERTM:
1763 case L2CAP_MODE_STREAMING:
1772 switch (chan->state) {
1776 /* Already connecting */
1781 /* Already connected */
1795 /* Set destination address and psm */
1797 bacpy(&bt_sk(sk)->dst, dst);
1803 auth_type = l2cap_get_auth_type(chan);
1805 if (bdaddr_type_is_le(dst_type))
1806 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1807 chan->sec_level, auth_type);
1809 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1810 chan->sec_level, auth_type);
1813 err = PTR_ERR(hcon);
1817 conn = l2cap_conn_add(hcon);
1819 hci_conn_drop(hcon);
1824 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1825 hci_conn_drop(hcon);
1830 /* Update source addr of the socket */
1831 bacpy(src, conn->src);
1833 l2cap_chan_unlock(chan);
1834 l2cap_chan_add(conn, chan);
1835 l2cap_chan_lock(chan);
1837 /* l2cap_chan_add takes its own ref so we can drop this one */
1838 hci_conn_drop(hcon);
1840 l2cap_state_change(chan, BT_CONNECT);
1841 __set_chan_timer(chan, sk->sk_sndtimeo);
1843 if (hcon->state == BT_CONNECTED) {
1844 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1845 __clear_chan_timer(chan);
1846 if (l2cap_chan_check_security(chan))
1847 l2cap_state_change(chan, BT_CONNECTED);
1849 l2cap_do_start(chan);
1855 l2cap_chan_unlock(chan);
1856 hci_dev_unlock(hdev);
1861 int __l2cap_wait_ack(struct sock *sk)
1863 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1864 DECLARE_WAITQUEUE(wait, current);
1868 add_wait_queue(sk_sleep(sk), &wait);
1869 set_current_state(TASK_INTERRUPTIBLE);
1870 while (chan->unacked_frames > 0 && chan->conn) {
1874 if (signal_pending(current)) {
1875 err = sock_intr_errno(timeo);
1880 timeo = schedule_timeout(timeo);
1882 set_current_state(TASK_INTERRUPTIBLE);
1884 err = sock_error(sk);
1888 set_current_state(TASK_RUNNING);
1889 remove_wait_queue(sk_sleep(sk), &wait);
1893 static void l2cap_monitor_timeout(struct work_struct *work)
1895 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1896 monitor_timer.work);
1898 BT_DBG("chan %p", chan);
1900 l2cap_chan_lock(chan);
1903 l2cap_chan_unlock(chan);
1904 l2cap_chan_put(chan);
1908 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1910 l2cap_chan_unlock(chan);
1911 l2cap_chan_put(chan);
1914 static void l2cap_retrans_timeout(struct work_struct *work)
1916 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1917 retrans_timer.work);
1919 BT_DBG("chan %p", chan);
1921 l2cap_chan_lock(chan);
1924 l2cap_chan_unlock(chan);
1925 l2cap_chan_put(chan);
1929 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1930 l2cap_chan_unlock(chan);
1931 l2cap_chan_put(chan);
1934 static void l2cap_streaming_send(struct l2cap_chan *chan,
1935 struct sk_buff_head *skbs)
1937 struct sk_buff *skb;
1938 struct l2cap_ctrl *control;
1940 BT_DBG("chan %p, skbs %p", chan, skbs);
1942 if (__chan_is_moving(chan))
1945 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1947 while (!skb_queue_empty(&chan->tx_q)) {
1949 skb = skb_dequeue(&chan->tx_q);
1951 bt_cb(skb)->control.retries = 1;
1952 control = &bt_cb(skb)->control;
1954 control->reqseq = 0;
1955 control->txseq = chan->next_tx_seq;
1957 __pack_control(chan, control, skb);
1959 if (chan->fcs == L2CAP_FCS_CRC16) {
1960 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1961 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1964 l2cap_do_send(chan, skb);
1966 BT_DBG("Sent txseq %u", control->txseq);
1968 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1969 chan->frames_sent++;
1973 static int l2cap_ertm_send(struct l2cap_chan *chan)
1975 struct sk_buff *skb, *tx_skb;
1976 struct l2cap_ctrl *control;
1979 BT_DBG("chan %p", chan);
1981 if (chan->state != BT_CONNECTED)
1984 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1987 if (__chan_is_moving(chan))
1990 while (chan->tx_send_head &&
1991 chan->unacked_frames < chan->remote_tx_win &&
1992 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1994 skb = chan->tx_send_head;
1996 bt_cb(skb)->control.retries = 1;
1997 control = &bt_cb(skb)->control;
1999 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2002 control->reqseq = chan->buffer_seq;
2003 chan->last_acked_seq = chan->buffer_seq;
2004 control->txseq = chan->next_tx_seq;
2006 __pack_control(chan, control, skb);
2008 if (chan->fcs == L2CAP_FCS_CRC16) {
2009 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2010 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2013 /* Clone after data has been modified. Data is assumed to be
2014 read-only (for locking purposes) on cloned sk_buffs.
2016 tx_skb = skb_clone(skb, GFP_KERNEL);
2021 __set_retrans_timer(chan);
2023 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2024 chan->unacked_frames++;
2025 chan->frames_sent++;
2028 if (skb_queue_is_last(&chan->tx_q, skb))
2029 chan->tx_send_head = NULL;
2031 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2033 l2cap_do_send(chan, tx_skb);
2034 BT_DBG("Sent txseq %u", control->txseq);
2037 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2038 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2043 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2045 struct l2cap_ctrl control;
2046 struct sk_buff *skb;
2047 struct sk_buff *tx_skb;
2050 BT_DBG("chan %p", chan);
2052 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2055 if (__chan_is_moving(chan))
2058 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2059 seq = l2cap_seq_list_pop(&chan->retrans_list);
2061 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2063 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2068 bt_cb(skb)->control.retries++;
2069 control = bt_cb(skb)->control;
2071 if (chan->max_tx != 0 &&
2072 bt_cb(skb)->control.retries > chan->max_tx) {
2073 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2074 l2cap_send_disconn_req(chan, ECONNRESET);
2075 l2cap_seq_list_clear(&chan->retrans_list);
2079 control.reqseq = chan->buffer_seq;
2080 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2085 if (skb_cloned(skb)) {
2086 /* Cloned sk_buffs are read-only, so we need a
2089 tx_skb = skb_copy(skb, GFP_KERNEL);
2091 tx_skb = skb_clone(skb, GFP_KERNEL);
2095 l2cap_seq_list_clear(&chan->retrans_list);
2099 /* Update skb contents */
2100 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2101 put_unaligned_le32(__pack_extended_control(&control),
2102 tx_skb->data + L2CAP_HDR_SIZE);
2104 put_unaligned_le16(__pack_enhanced_control(&control),
2105 tx_skb->data + L2CAP_HDR_SIZE);
2108 if (chan->fcs == L2CAP_FCS_CRC16) {
2109 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2110 put_unaligned_le16(fcs, skb_put(tx_skb,
2114 l2cap_do_send(chan, tx_skb);
2116 BT_DBG("Resent txseq %d", control.txseq);
2118 chan->last_acked_seq = chan->buffer_seq;
2122 static void l2cap_retransmit(struct l2cap_chan *chan,
2123 struct l2cap_ctrl *control)
2125 BT_DBG("chan %p, control %p", chan, control);
2127 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2128 l2cap_ertm_resend(chan);
2131 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2132 struct l2cap_ctrl *control)
2134 struct sk_buff *skb;
2136 BT_DBG("chan %p, control %p", chan, control);
2139 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2141 l2cap_seq_list_clear(&chan->retrans_list);
2143 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2146 if (chan->unacked_frames) {
2147 skb_queue_walk(&chan->tx_q, skb) {
2148 if (bt_cb(skb)->control.txseq == control->reqseq ||
2149 skb == chan->tx_send_head)
2153 skb_queue_walk_from(&chan->tx_q, skb) {
2154 if (skb == chan->tx_send_head)
2157 l2cap_seq_list_append(&chan->retrans_list,
2158 bt_cb(skb)->control.txseq);
2161 l2cap_ertm_resend(chan);
2165 static void l2cap_send_ack(struct l2cap_chan *chan)
2167 struct l2cap_ctrl control;
2168 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2169 chan->last_acked_seq);
2172 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2173 chan, chan->last_acked_seq, chan->buffer_seq);
2175 memset(&control, 0, sizeof(control));
2178 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2179 chan->rx_state == L2CAP_RX_STATE_RECV) {
2180 __clear_ack_timer(chan);
2181 control.super = L2CAP_SUPER_RNR;
2182 control.reqseq = chan->buffer_seq;
2183 l2cap_send_sframe(chan, &control);
2185 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2186 l2cap_ertm_send(chan);
2187 /* If any i-frames were sent, they included an ack */
2188 if (chan->buffer_seq == chan->last_acked_seq)
2192 /* Ack now if the window is 3/4ths full.
2193 * Calculate without mul or div
2195 threshold = chan->ack_win;
2196 threshold += threshold << 1;
2199 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2202 if (frames_to_ack >= threshold) {
2203 __clear_ack_timer(chan);
2204 control.super = L2CAP_SUPER_RR;
2205 control.reqseq = chan->buffer_seq;
2206 l2cap_send_sframe(chan, &control);
2211 __set_ack_timer(chan);
2215 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2216 struct msghdr *msg, int len,
2217 int count, struct sk_buff *skb)
2219 struct l2cap_conn *conn = chan->conn;
2220 struct sk_buff **frag;
2223 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2229 /* Continuation fragments (no L2CAP header) */
2230 frag = &skb_shinfo(skb)->frag_list;
2232 struct sk_buff *tmp;
2234 count = min_t(unsigned int, conn->mtu, len);
2236 tmp = chan->ops->alloc_skb(chan, count,
2237 msg->msg_flags & MSG_DONTWAIT);
2239 return PTR_ERR(tmp);
2243 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2246 (*frag)->priority = skb->priority;
2251 skb->len += (*frag)->len;
2252 skb->data_len += (*frag)->len;
2254 frag = &(*frag)->next;
2260 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2261 struct msghdr *msg, size_t len,
2264 struct l2cap_conn *conn = chan->conn;
2265 struct sk_buff *skb;
2266 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2267 struct l2cap_hdr *lh;
2269 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2271 count = min_t(unsigned int, (conn->mtu - hlen), len);
2273 skb = chan->ops->alloc_skb(chan, count + hlen,
2274 msg->msg_flags & MSG_DONTWAIT);
2278 skb->priority = priority;
2280 /* Create L2CAP header */
2281 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2282 lh->cid = cpu_to_le16(chan->dcid);
2283 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2284 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2286 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2287 if (unlikely(err < 0)) {
2289 return ERR_PTR(err);
2294 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2295 struct msghdr *msg, size_t len,
2298 struct l2cap_conn *conn = chan->conn;
2299 struct sk_buff *skb;
2301 struct l2cap_hdr *lh;
2303 BT_DBG("chan %p len %zu", chan, len);
2305 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2307 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2308 msg->msg_flags & MSG_DONTWAIT);
2312 skb->priority = priority;
2314 /* Create L2CAP header */
2315 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2316 lh->cid = cpu_to_le16(chan->dcid);
2317 lh->len = cpu_to_le16(len);
2319 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2320 if (unlikely(err < 0)) {
2322 return ERR_PTR(err);
2327 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2328 struct msghdr *msg, size_t len,
2331 struct l2cap_conn *conn = chan->conn;
2332 struct sk_buff *skb;
2333 int err, count, hlen;
2334 struct l2cap_hdr *lh;
2336 BT_DBG("chan %p len %zu", chan, len);
2339 return ERR_PTR(-ENOTCONN);
2341 hlen = __ertm_hdr_size(chan);
2344 hlen += L2CAP_SDULEN_SIZE;
2346 if (chan->fcs == L2CAP_FCS_CRC16)
2347 hlen += L2CAP_FCS_SIZE;
2349 count = min_t(unsigned int, (conn->mtu - hlen), len);
2351 skb = chan->ops->alloc_skb(chan, count + hlen,
2352 msg->msg_flags & MSG_DONTWAIT);
2356 /* Create L2CAP header */
2357 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2358 lh->cid = cpu_to_le16(chan->dcid);
2359 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2361 /* Control header is populated later */
2362 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2363 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2365 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2368 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2370 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2371 if (unlikely(err < 0)) {
2373 return ERR_PTR(err);
2376 bt_cb(skb)->control.fcs = chan->fcs;
2377 bt_cb(skb)->control.retries = 0;
2381 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2382 struct sk_buff_head *seg_queue,
2383 struct msghdr *msg, size_t len)
2385 struct sk_buff *skb;
2390 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2392 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2393 * so fragmented skbs are not used. The HCI layer's handling
2394 * of fragmented skbs is not compatible with ERTM's queueing.
2397 /* PDU size is derived from the HCI MTU */
2398 pdu_len = chan->conn->mtu;
2400 /* Constrain PDU size for BR/EDR connections */
2402 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2404 /* Adjust for largest possible L2CAP overhead. */
2406 pdu_len -= L2CAP_FCS_SIZE;
2408 pdu_len -= __ertm_hdr_size(chan);
2410 /* Remote device may have requested smaller PDUs */
2411 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2413 if (len <= pdu_len) {
2414 sar = L2CAP_SAR_UNSEGMENTED;
2418 sar = L2CAP_SAR_START;
2420 pdu_len -= L2CAP_SDULEN_SIZE;
2424 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2427 __skb_queue_purge(seg_queue);
2428 return PTR_ERR(skb);
2431 bt_cb(skb)->control.sar = sar;
2432 __skb_queue_tail(seg_queue, skb);
2437 pdu_len += L2CAP_SDULEN_SIZE;
2440 if (len <= pdu_len) {
2441 sar = L2CAP_SAR_END;
2444 sar = L2CAP_SAR_CONTINUE;
2451 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2454 struct sk_buff *skb;
2456 struct sk_buff_head seg_queue;
2458 /* Connectionless channel */
2459 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2460 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2462 return PTR_ERR(skb);
2464 l2cap_do_send(chan, skb);
2468 switch (chan->mode) {
2469 case L2CAP_MODE_BASIC:
2470 /* Check outgoing MTU */
2471 if (len > chan->omtu)
2474 /* Create a basic PDU */
2475 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2477 return PTR_ERR(skb);
2479 l2cap_do_send(chan, skb);
2483 case L2CAP_MODE_ERTM:
2484 case L2CAP_MODE_STREAMING:
2485 /* Check outgoing MTU */
2486 if (len > chan->omtu) {
2491 __skb_queue_head_init(&seg_queue);
2493 /* Do segmentation before calling in to the state machine,
2494 * since it's possible to block while waiting for memory
2497 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2499 /* The channel could have been closed while segmenting,
2500 * check that it is still connected.
2502 if (chan->state != BT_CONNECTED) {
2503 __skb_queue_purge(&seg_queue);
2510 if (chan->mode == L2CAP_MODE_ERTM)
2511 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2513 l2cap_streaming_send(chan, &seg_queue);
2517 /* If the skbs were not queued for sending, they'll still be in
2518 * seg_queue and need to be purged.
2520 __skb_queue_purge(&seg_queue);
2524 BT_DBG("bad state %1.1x", chan->mode);
2531 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2533 struct l2cap_ctrl control;
2536 BT_DBG("chan %p, txseq %u", chan, txseq);
2538 memset(&control, 0, sizeof(control));
2540 control.super = L2CAP_SUPER_SREJ;
2542 for (seq = chan->expected_tx_seq; seq != txseq;
2543 seq = __next_seq(chan, seq)) {
2544 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2545 control.reqseq = seq;
2546 l2cap_send_sframe(chan, &control);
2547 l2cap_seq_list_append(&chan->srej_list, seq);
2551 chan->expected_tx_seq = __next_seq(chan, txseq);
2554 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2556 struct l2cap_ctrl control;
2558 BT_DBG("chan %p", chan);
2560 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2563 memset(&control, 0, sizeof(control));
2565 control.super = L2CAP_SUPER_SREJ;
2566 control.reqseq = chan->srej_list.tail;
2567 l2cap_send_sframe(chan, &control);
2570 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2572 struct l2cap_ctrl control;
2576 BT_DBG("chan %p, txseq %u", chan, txseq);
2578 memset(&control, 0, sizeof(control));
2580 control.super = L2CAP_SUPER_SREJ;
2582 /* Capture initial list head to allow only one pass through the list. */
2583 initial_head = chan->srej_list.head;
2586 seq = l2cap_seq_list_pop(&chan->srej_list);
2587 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2590 control.reqseq = seq;
2591 l2cap_send_sframe(chan, &control);
2592 l2cap_seq_list_append(&chan->srej_list, seq);
2593 } while (chan->srej_list.head != initial_head);
2596 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2598 struct sk_buff *acked_skb;
2601 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2603 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2606 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2607 chan->expected_ack_seq, chan->unacked_frames);
2609 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2610 ackseq = __next_seq(chan, ackseq)) {
2612 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2614 skb_unlink(acked_skb, &chan->tx_q);
2615 kfree_skb(acked_skb);
2616 chan->unacked_frames--;
2620 chan->expected_ack_seq = reqseq;
2622 if (chan->unacked_frames == 0)
2623 __clear_retrans_timer(chan);
2625 BT_DBG("unacked_frames %u", chan->unacked_frames);
2628 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2630 BT_DBG("chan %p", chan);
2632 chan->expected_tx_seq = chan->buffer_seq;
2633 l2cap_seq_list_clear(&chan->srej_list);
2634 skb_queue_purge(&chan->srej_q);
2635 chan->rx_state = L2CAP_RX_STATE_RECV;
2638 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2639 struct l2cap_ctrl *control,
2640 struct sk_buff_head *skbs, u8 event)
2642 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2646 case L2CAP_EV_DATA_REQUEST:
2647 if (chan->tx_send_head == NULL)
2648 chan->tx_send_head = skb_peek(skbs);
2650 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2651 l2cap_ertm_send(chan);
2653 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2654 BT_DBG("Enter LOCAL_BUSY");
2655 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2657 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2658 /* The SREJ_SENT state must be aborted if we are to
2659 * enter the LOCAL_BUSY state.
2661 l2cap_abort_rx_srej_sent(chan);
2664 l2cap_send_ack(chan);
2667 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2668 BT_DBG("Exit LOCAL_BUSY");
2669 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2671 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2672 struct l2cap_ctrl local_control;
2674 memset(&local_control, 0, sizeof(local_control));
2675 local_control.sframe = 1;
2676 local_control.super = L2CAP_SUPER_RR;
2677 local_control.poll = 1;
2678 local_control.reqseq = chan->buffer_seq;
2679 l2cap_send_sframe(chan, &local_control);
2681 chan->retry_count = 1;
2682 __set_monitor_timer(chan);
2683 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2686 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2687 l2cap_process_reqseq(chan, control->reqseq);
2689 case L2CAP_EV_EXPLICIT_POLL:
2690 l2cap_send_rr_or_rnr(chan, 1);
2691 chan->retry_count = 1;
2692 __set_monitor_timer(chan);
2693 __clear_ack_timer(chan);
2694 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2696 case L2CAP_EV_RETRANS_TO:
2697 l2cap_send_rr_or_rnr(chan, 1);
2698 chan->retry_count = 1;
2699 __set_monitor_timer(chan);
2700 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2702 case L2CAP_EV_RECV_FBIT:
2703 /* Nothing to process */
2710 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2711 struct l2cap_ctrl *control,
2712 struct sk_buff_head *skbs, u8 event)
2714 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2718 case L2CAP_EV_DATA_REQUEST:
2719 if (chan->tx_send_head == NULL)
2720 chan->tx_send_head = skb_peek(skbs);
2721 /* Queue data, but don't send. */
2722 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2724 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2725 BT_DBG("Enter LOCAL_BUSY");
2726 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2728 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2729 /* The SREJ_SENT state must be aborted if we are to
2730 * enter the LOCAL_BUSY state.
2732 l2cap_abort_rx_srej_sent(chan);
2735 l2cap_send_ack(chan);
2738 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2739 BT_DBG("Exit LOCAL_BUSY");
2740 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2742 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2743 struct l2cap_ctrl local_control;
2744 memset(&local_control, 0, sizeof(local_control));
2745 local_control.sframe = 1;
2746 local_control.super = L2CAP_SUPER_RR;
2747 local_control.poll = 1;
2748 local_control.reqseq = chan->buffer_seq;
2749 l2cap_send_sframe(chan, &local_control);
2751 chan->retry_count = 1;
2752 __set_monitor_timer(chan);
2753 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2756 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2757 l2cap_process_reqseq(chan, control->reqseq);
2761 case L2CAP_EV_RECV_FBIT:
2762 if (control && control->final) {
2763 __clear_monitor_timer(chan);
2764 if (chan->unacked_frames > 0)
2765 __set_retrans_timer(chan);
2766 chan->retry_count = 0;
2767 chan->tx_state = L2CAP_TX_STATE_XMIT;
2768 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2771 case L2CAP_EV_EXPLICIT_POLL:
2774 case L2CAP_EV_MONITOR_TO:
2775 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2776 l2cap_send_rr_or_rnr(chan, 1);
2777 __set_monitor_timer(chan);
2778 chan->retry_count++;
2780 l2cap_send_disconn_req(chan, ECONNABORTED);
2788 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2789 struct sk_buff_head *skbs, u8 event)
2791 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2792 chan, control, skbs, event, chan->tx_state);
2794 switch (chan->tx_state) {
2795 case L2CAP_TX_STATE_XMIT:
2796 l2cap_tx_state_xmit(chan, control, skbs, event);
2798 case L2CAP_TX_STATE_WAIT_F:
2799 l2cap_tx_state_wait_f(chan, control, skbs, event);
2807 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2808 struct l2cap_ctrl *control)
2810 BT_DBG("chan %p, control %p", chan, control);
2811 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2814 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2815 struct l2cap_ctrl *control)
2817 BT_DBG("chan %p, control %p", chan, control);
2818 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2821 /* Copy frame to all raw sockets on that connection */
2822 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2824 struct sk_buff *nskb;
2825 struct l2cap_chan *chan;
2827 BT_DBG("conn %p", conn);
2829 mutex_lock(&conn->chan_lock);
2831 list_for_each_entry(chan, &conn->chan_l, list) {
2832 struct sock *sk = chan->sk;
2833 if (chan->chan_type != L2CAP_CHAN_RAW)
2836 /* Don't send frame to the socket it came from */
2839 nskb = skb_clone(skb, GFP_KERNEL);
2843 if (chan->ops->recv(chan, nskb))
2847 mutex_unlock(&conn->chan_lock);
2850 /* ---- L2CAP signalling commands ---- */
2851 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2852 u8 ident, u16 dlen, void *data)
2854 struct sk_buff *skb, **frag;
2855 struct l2cap_cmd_hdr *cmd;
2856 struct l2cap_hdr *lh;
2859 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2860 conn, code, ident, dlen);
2862 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2865 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2866 count = min_t(unsigned int, conn->mtu, len);
2868 skb = bt_skb_alloc(count, GFP_KERNEL);
2872 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2873 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2875 if (conn->hcon->type == LE_LINK)
2876 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2878 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2880 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2883 cmd->len = cpu_to_le16(dlen);
2886 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2887 memcpy(skb_put(skb, count), data, count);
2893 /* Continuation fragments (no L2CAP header) */
2894 frag = &skb_shinfo(skb)->frag_list;
2896 count = min_t(unsigned int, conn->mtu, len);
2898 *frag = bt_skb_alloc(count, GFP_KERNEL);
2902 memcpy(skb_put(*frag, count), data, count);
2907 frag = &(*frag)->next;
2917 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2920 struct l2cap_conf_opt *opt = *ptr;
2923 len = L2CAP_CONF_OPT_SIZE + opt->len;
2931 *val = *((u8 *) opt->val);
2935 *val = get_unaligned_le16(opt->val);
2939 *val = get_unaligned_le32(opt->val);
2943 *val = (unsigned long) opt->val;
2947 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2951 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2953 struct l2cap_conf_opt *opt = *ptr;
2955 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2962 *((u8 *) opt->val) = val;
2966 put_unaligned_le16(val, opt->val);
2970 put_unaligned_le32(val, opt->val);
2974 memcpy(opt->val, (void *) val, len);
2978 *ptr += L2CAP_CONF_OPT_SIZE + len;
2981 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2983 struct l2cap_conf_efs efs;
2985 switch (chan->mode) {
2986 case L2CAP_MODE_ERTM:
2987 efs.id = chan->local_id;
2988 efs.stype = chan->local_stype;
2989 efs.msdu = cpu_to_le16(chan->local_msdu);
2990 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2991 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2992 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2995 case L2CAP_MODE_STREAMING:
2997 efs.stype = L2CAP_SERV_BESTEFFORT;
2998 efs.msdu = cpu_to_le16(chan->local_msdu);
2999 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3008 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3009 (unsigned long) &efs);
3012 static void l2cap_ack_timeout(struct work_struct *work)
3014 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3018 BT_DBG("chan %p", chan);
3020 l2cap_chan_lock(chan);
3022 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3023 chan->last_acked_seq);
3026 l2cap_send_rr_or_rnr(chan, 0);
3028 l2cap_chan_unlock(chan);
3029 l2cap_chan_put(chan);
3032 int l2cap_ertm_init(struct l2cap_chan *chan)
3036 chan->next_tx_seq = 0;
3037 chan->expected_tx_seq = 0;
3038 chan->expected_ack_seq = 0;
3039 chan->unacked_frames = 0;
3040 chan->buffer_seq = 0;
3041 chan->frames_sent = 0;
3042 chan->last_acked_seq = 0;
3044 chan->sdu_last_frag = NULL;
3047 skb_queue_head_init(&chan->tx_q);
3049 chan->local_amp_id = 0;
3051 chan->move_state = L2CAP_MOVE_STABLE;
3052 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3054 if (chan->mode != L2CAP_MODE_ERTM)
3057 chan->rx_state = L2CAP_RX_STATE_RECV;
3058 chan->tx_state = L2CAP_TX_STATE_XMIT;
3060 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3061 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3062 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3064 skb_queue_head_init(&chan->srej_q);
3066 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3070 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3072 l2cap_seq_list_free(&chan->srej_list);
3077 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3080 case L2CAP_MODE_STREAMING:
3081 case L2CAP_MODE_ERTM:
3082 if (l2cap_mode_supported(mode, remote_feat_mask))
3086 return L2CAP_MODE_BASIC;
3090 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3092 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3095 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3097 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3100 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3101 struct l2cap_conf_rfc *rfc)
3103 if (chan->local_amp_id && chan->hs_hcon) {
3104 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3106 /* Class 1 devices have must have ERTM timeouts
3107 * exceeding the Link Supervision Timeout. The
3108 * default Link Supervision Timeout for AMP
3109 * controllers is 10 seconds.
3111 * Class 1 devices use 0xffffffff for their
3112 * best-effort flush timeout, so the clamping logic
3113 * will result in a timeout that meets the above
3114 * requirement. ERTM timeouts are 16-bit values, so
3115 * the maximum timeout is 65.535 seconds.
3118 /* Convert timeout to milliseconds and round */
3119 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3121 /* This is the recommended formula for class 2 devices
3122 * that start ERTM timers when packets are sent to the
3125 ertm_to = 3 * ertm_to + 500;
3127 if (ertm_to > 0xffff)
3130 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3131 rfc->monitor_timeout = rfc->retrans_timeout;
3133 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3134 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3138 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3140 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3141 __l2cap_ews_supported(chan->conn)) {
3142 /* use extended control field */
3143 set_bit(FLAG_EXT_CTRL, &chan->flags);
3144 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3146 chan->tx_win = min_t(u16, chan->tx_win,
3147 L2CAP_DEFAULT_TX_WINDOW);
3148 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3150 chan->ack_win = chan->tx_win;
3153 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3155 struct l2cap_conf_req *req = data;
3156 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3157 void *ptr = req->data;
3160 BT_DBG("chan %p", chan);
3162 if (chan->num_conf_req || chan->num_conf_rsp)
3165 switch (chan->mode) {
3166 case L2CAP_MODE_STREAMING:
3167 case L2CAP_MODE_ERTM:
3168 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3171 if (__l2cap_efs_supported(chan->conn))
3172 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3176 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3181 if (chan->imtu != L2CAP_DEFAULT_MTU)
3182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3184 switch (chan->mode) {
3185 case L2CAP_MODE_BASIC:
3186 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3187 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3190 rfc.mode = L2CAP_MODE_BASIC;
3192 rfc.max_transmit = 0;
3193 rfc.retrans_timeout = 0;
3194 rfc.monitor_timeout = 0;
3195 rfc.max_pdu_size = 0;
3197 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3198 (unsigned long) &rfc);
3201 case L2CAP_MODE_ERTM:
3202 rfc.mode = L2CAP_MODE_ERTM;
3203 rfc.max_transmit = chan->max_tx;
3205 __l2cap_set_ertm_timeouts(chan, &rfc);
3207 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3208 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3210 rfc.max_pdu_size = cpu_to_le16(size);
3212 l2cap_txwin_setup(chan);
3214 rfc.txwin_size = min_t(u16, chan->tx_win,
3215 L2CAP_DEFAULT_TX_WINDOW);
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3218 (unsigned long) &rfc);
3220 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3221 l2cap_add_opt_efs(&ptr, chan);
3223 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3227 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3228 if (chan->fcs == L2CAP_FCS_NONE ||
3229 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3230 chan->fcs = L2CAP_FCS_NONE;
3231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3236 case L2CAP_MODE_STREAMING:
3237 l2cap_txwin_setup(chan);
3238 rfc.mode = L2CAP_MODE_STREAMING;
3240 rfc.max_transmit = 0;
3241 rfc.retrans_timeout = 0;
3242 rfc.monitor_timeout = 0;
3244 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3245 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3247 rfc.max_pdu_size = cpu_to_le16(size);
3249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3250 (unsigned long) &rfc);
3252 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3253 l2cap_add_opt_efs(&ptr, chan);
3255 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3256 if (chan->fcs == L2CAP_FCS_NONE ||
3257 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3258 chan->fcs = L2CAP_FCS_NONE;
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3265 req->dcid = cpu_to_le16(chan->dcid);
3266 req->flags = __constant_cpu_to_le16(0);
3271 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3273 struct l2cap_conf_rsp *rsp = data;
3274 void *ptr = rsp->data;
3275 void *req = chan->conf_req;
3276 int len = chan->conf_len;
3277 int type, hint, olen;
3279 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3280 struct l2cap_conf_efs efs;
3282 u16 mtu = L2CAP_DEFAULT_MTU;
3283 u16 result = L2CAP_CONF_SUCCESS;
3286 BT_DBG("chan %p", chan);
3288 while (len >= L2CAP_CONF_OPT_SIZE) {
3289 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3291 hint = type & L2CAP_CONF_HINT;
3292 type &= L2CAP_CONF_MASK;
3295 case L2CAP_CONF_MTU:
3299 case L2CAP_CONF_FLUSH_TO:
3300 chan->flush_to = val;
3303 case L2CAP_CONF_QOS:
3306 case L2CAP_CONF_RFC:
3307 if (olen == sizeof(rfc))
3308 memcpy(&rfc, (void *) val, olen);
3311 case L2CAP_CONF_FCS:
3312 if (val == L2CAP_FCS_NONE)
3313 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3316 case L2CAP_CONF_EFS:
3318 if (olen == sizeof(efs))
3319 memcpy(&efs, (void *) val, olen);
3322 case L2CAP_CONF_EWS:
3323 if (!chan->conn->hs_enabled)
3324 return -ECONNREFUSED;
3326 set_bit(FLAG_EXT_CTRL, &chan->flags);
3327 set_bit(CONF_EWS_RECV, &chan->conf_state);
3328 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3329 chan->remote_tx_win = val;
3336 result = L2CAP_CONF_UNKNOWN;
3337 *((u8 *) ptr++) = type;
3342 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3345 switch (chan->mode) {
3346 case L2CAP_MODE_STREAMING:
3347 case L2CAP_MODE_ERTM:
3348 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3349 chan->mode = l2cap_select_mode(rfc.mode,
3350 chan->conn->feat_mask);
3355 if (__l2cap_efs_supported(chan->conn))
3356 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3358 return -ECONNREFUSED;
3361 if (chan->mode != rfc.mode)
3362 return -ECONNREFUSED;
3368 if (chan->mode != rfc.mode) {
3369 result = L2CAP_CONF_UNACCEPT;
3370 rfc.mode = chan->mode;
3372 if (chan->num_conf_rsp == 1)
3373 return -ECONNREFUSED;
3375 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3376 (unsigned long) &rfc);
3379 if (result == L2CAP_CONF_SUCCESS) {
3380 /* Configure output options and let the other side know
3381 * which ones we don't like. */
3383 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3384 result = L2CAP_CONF_UNACCEPT;
3387 set_bit(CONF_MTU_DONE, &chan->conf_state);
3389 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3392 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3393 efs.stype != L2CAP_SERV_NOTRAFIC &&
3394 efs.stype != chan->local_stype) {
3396 result = L2CAP_CONF_UNACCEPT;
3398 if (chan->num_conf_req >= 1)
3399 return -ECONNREFUSED;
3401 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3403 (unsigned long) &efs);
3405 /* Send PENDING Conf Rsp */
3406 result = L2CAP_CONF_PENDING;
3407 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3412 case L2CAP_MODE_BASIC:
3413 chan->fcs = L2CAP_FCS_NONE;
3414 set_bit(CONF_MODE_DONE, &chan->conf_state);
3417 case L2CAP_MODE_ERTM:
3418 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3419 chan->remote_tx_win = rfc.txwin_size;
3421 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3423 chan->remote_max_tx = rfc.max_transmit;
3425 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3426 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3427 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3428 rfc.max_pdu_size = cpu_to_le16(size);
3429 chan->remote_mps = size;
3431 __l2cap_set_ertm_timeouts(chan, &rfc);
3433 set_bit(CONF_MODE_DONE, &chan->conf_state);
3435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3436 sizeof(rfc), (unsigned long) &rfc);
3438 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3439 chan->remote_id = efs.id;
3440 chan->remote_stype = efs.stype;
3441 chan->remote_msdu = le16_to_cpu(efs.msdu);
3442 chan->remote_flush_to =
3443 le32_to_cpu(efs.flush_to);
3444 chan->remote_acc_lat =
3445 le32_to_cpu(efs.acc_lat);
3446 chan->remote_sdu_itime =
3447 le32_to_cpu(efs.sdu_itime);
3448 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3450 (unsigned long) &efs);
3454 case L2CAP_MODE_STREAMING:
3455 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3456 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3457 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3458 rfc.max_pdu_size = cpu_to_le16(size);
3459 chan->remote_mps = size;
3461 set_bit(CONF_MODE_DONE, &chan->conf_state);
3463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3464 (unsigned long) &rfc);
3469 result = L2CAP_CONF_UNACCEPT;
3471 memset(&rfc, 0, sizeof(rfc));
3472 rfc.mode = chan->mode;
3475 if (result == L2CAP_CONF_SUCCESS)
3476 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3478 rsp->scid = cpu_to_le16(chan->dcid);
3479 rsp->result = cpu_to_le16(result);
3480 rsp->flags = __constant_cpu_to_le16(0);
3485 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3486 void *data, u16 *result)
3488 struct l2cap_conf_req *req = data;
3489 void *ptr = req->data;
3492 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3493 struct l2cap_conf_efs efs;
3495 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3497 while (len >= L2CAP_CONF_OPT_SIZE) {
3498 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3501 case L2CAP_CONF_MTU:
3502 if (val < L2CAP_DEFAULT_MIN_MTU) {
3503 *result = L2CAP_CONF_UNACCEPT;
3504 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3510 case L2CAP_CONF_FLUSH_TO:
3511 chan->flush_to = val;
3512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3516 case L2CAP_CONF_RFC:
3517 if (olen == sizeof(rfc))
3518 memcpy(&rfc, (void *)val, olen);
3520 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3521 rfc.mode != chan->mode)
3522 return -ECONNREFUSED;
3526 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3527 sizeof(rfc), (unsigned long) &rfc);
3530 case L2CAP_CONF_EWS:
3531 chan->ack_win = min_t(u16, val, chan->ack_win);
3532 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3536 case L2CAP_CONF_EFS:
3537 if (olen == sizeof(efs))
3538 memcpy(&efs, (void *)val, olen);
3540 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3541 efs.stype != L2CAP_SERV_NOTRAFIC &&
3542 efs.stype != chan->local_stype)
3543 return -ECONNREFUSED;
3545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3546 (unsigned long) &efs);
3549 case L2CAP_CONF_FCS:
3550 if (*result == L2CAP_CONF_PENDING)
3551 if (val == L2CAP_FCS_NONE)
3552 set_bit(CONF_RECV_NO_FCS,
3558 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3559 return -ECONNREFUSED;
3561 chan->mode = rfc.mode;
3563 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3565 case L2CAP_MODE_ERTM:
3566 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3567 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3568 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3569 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3570 chan->ack_win = min_t(u16, chan->ack_win,
3573 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3574 chan->local_msdu = le16_to_cpu(efs.msdu);
3575 chan->local_sdu_itime =
3576 le32_to_cpu(efs.sdu_itime);
3577 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3578 chan->local_flush_to =
3579 le32_to_cpu(efs.flush_to);
3583 case L2CAP_MODE_STREAMING:
3584 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3588 req->dcid = cpu_to_le16(chan->dcid);
3589 req->flags = __constant_cpu_to_le16(0);
3594 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3595 u16 result, u16 flags)
3597 struct l2cap_conf_rsp *rsp = data;
3598 void *ptr = rsp->data;
3600 BT_DBG("chan %p", chan);
3602 rsp->scid = cpu_to_le16(chan->dcid);
3603 rsp->result = cpu_to_le16(result);
3604 rsp->flags = cpu_to_le16(flags);
3609 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3611 struct l2cap_conn_rsp rsp;
3612 struct l2cap_conn *conn = chan->conn;
3616 rsp.scid = cpu_to_le16(chan->dcid);
3617 rsp.dcid = cpu_to_le16(chan->scid);
3618 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3619 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3622 rsp_code = L2CAP_CREATE_CHAN_RSP;
3624 rsp_code = L2CAP_CONN_RSP;
3626 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3628 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3630 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3633 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3634 l2cap_build_conf_req(chan, buf), buf);
3635 chan->num_conf_req++;
3638 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3642 /* Use sane default values in case a misbehaving remote device
3643 * did not send an RFC or extended window size option.
3645 u16 txwin_ext = chan->ack_win;
3646 struct l2cap_conf_rfc rfc = {
3648 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3649 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3650 .max_pdu_size = cpu_to_le16(chan->imtu),
3651 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3654 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3656 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3659 while (len >= L2CAP_CONF_OPT_SIZE) {
3660 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3663 case L2CAP_CONF_RFC:
3664 if (olen == sizeof(rfc))
3665 memcpy(&rfc, (void *)val, olen);
3667 case L2CAP_CONF_EWS:
3674 case L2CAP_MODE_ERTM:
3675 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3676 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3677 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3678 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3679 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3681 chan->ack_win = min_t(u16, chan->ack_win,
3684 case L2CAP_MODE_STREAMING:
3685 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3689 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3690 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3693 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3695 if (cmd_len < sizeof(*rej))
3698 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3701 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3702 cmd->ident == conn->info_ident) {
3703 cancel_delayed_work(&conn->info_timer);
3705 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3706 conn->info_ident = 0;
3708 l2cap_conn_start(conn);
3714 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3715 struct l2cap_cmd_hdr *cmd,
3716 u8 *data, u8 rsp_code, u8 amp_id)
3718 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3719 struct l2cap_conn_rsp rsp;
3720 struct l2cap_chan *chan = NULL, *pchan;
3721 struct sock *parent, *sk = NULL;
3722 int result, status = L2CAP_CS_NO_INFO;
3724 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3725 __le16 psm = req->psm;
3727 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3729 /* Check if we have socket listening on psm */
3730 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3732 result = L2CAP_CR_BAD_PSM;
3738 mutex_lock(&conn->chan_lock);
3741 /* Check if the ACL is secure enough (if not SDP) */
3742 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3743 !hci_conn_check_link_mode(conn->hcon)) {
3744 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3745 result = L2CAP_CR_SEC_BLOCK;
3749 result = L2CAP_CR_NO_MEM;
3751 /* Check if we already have channel with that dcid */
3752 if (__l2cap_get_chan_by_dcid(conn, scid))
3755 chan = pchan->ops->new_connection(pchan);
3761 /* For certain devices (ex: HID mouse), support for authentication,
3762 * pairing and bonding is optional. For such devices, inorder to avoid
3763 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3764 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3766 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3768 bacpy(&bt_sk(sk)->src, conn->src);
3769 bacpy(&bt_sk(sk)->dst, conn->dst);
3772 chan->local_amp_id = amp_id;
3774 __l2cap_chan_add(conn, chan);
3778 __set_chan_timer(chan, sk->sk_sndtimeo);
3780 chan->ident = cmd->ident;
3782 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3783 if (l2cap_chan_check_security(chan)) {
3784 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3785 __l2cap_state_change(chan, BT_CONNECT2);
3786 result = L2CAP_CR_PEND;
3787 status = L2CAP_CS_AUTHOR_PEND;
3788 chan->ops->defer(chan);
3790 /* Force pending result for AMP controllers.
3791 * The connection will succeed after the
3792 * physical link is up.
3795 __l2cap_state_change(chan, BT_CONNECT2);
3796 result = L2CAP_CR_PEND;
3798 __l2cap_state_change(chan, BT_CONFIG);
3799 result = L2CAP_CR_SUCCESS;
3801 status = L2CAP_CS_NO_INFO;
3804 __l2cap_state_change(chan, BT_CONNECT2);
3805 result = L2CAP_CR_PEND;
3806 status = L2CAP_CS_AUTHEN_PEND;
3809 __l2cap_state_change(chan, BT_CONNECT2);
3810 result = L2CAP_CR_PEND;
3811 status = L2CAP_CS_NO_INFO;
3815 release_sock(parent);
3816 mutex_unlock(&conn->chan_lock);
3819 rsp.scid = cpu_to_le16(scid);
3820 rsp.dcid = cpu_to_le16(dcid);
3821 rsp.result = cpu_to_le16(result);
3822 rsp.status = cpu_to_le16(status);
3823 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3825 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3826 struct l2cap_info_req info;
3827 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3829 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3830 conn->info_ident = l2cap_get_ident(conn);
3832 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3834 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3835 sizeof(info), &info);
3838 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3839 result == L2CAP_CR_SUCCESS) {
3841 set_bit(CONF_REQ_SENT, &chan->conf_state);
3842 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3843 l2cap_build_conf_req(chan, buf), buf);
3844 chan->num_conf_req++;
3850 static int l2cap_connect_req(struct l2cap_conn *conn,
3851 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3853 struct hci_dev *hdev = conn->hcon->hdev;
3854 struct hci_conn *hcon = conn->hcon;
3856 if (cmd_len < sizeof(struct l2cap_conn_req))
3860 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3861 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3862 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3863 hcon->dst_type, 0, NULL, 0,
3865 hci_dev_unlock(hdev);
3867 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3871 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3872 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3875 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3876 u16 scid, dcid, result, status;
3877 struct l2cap_chan *chan;
3881 if (cmd_len < sizeof(*rsp))
3884 scid = __le16_to_cpu(rsp->scid);
3885 dcid = __le16_to_cpu(rsp->dcid);
3886 result = __le16_to_cpu(rsp->result);
3887 status = __le16_to_cpu(rsp->status);
3889 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3890 dcid, scid, result, status);
3892 mutex_lock(&conn->chan_lock);
3895 chan = __l2cap_get_chan_by_scid(conn, scid);
3901 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3910 l2cap_chan_lock(chan);
3913 case L2CAP_CR_SUCCESS:
3914 l2cap_state_change(chan, BT_CONFIG);
3917 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3919 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3922 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3923 l2cap_build_conf_req(chan, req), req);
3924 chan->num_conf_req++;
3928 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3932 l2cap_chan_del(chan, ECONNREFUSED);
3936 l2cap_chan_unlock(chan);
3939 mutex_unlock(&conn->chan_lock);
3944 static inline void set_default_fcs(struct l2cap_chan *chan)
3946 /* FCS is enabled only in ERTM or streaming mode, if one or both
3949 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3950 chan->fcs = L2CAP_FCS_NONE;
3951 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3952 chan->fcs = L2CAP_FCS_CRC16;
3955 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3956 u8 ident, u16 flags)
3958 struct l2cap_conn *conn = chan->conn;
3960 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3963 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3964 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3966 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3967 l2cap_build_conf_rsp(chan, data,
3968 L2CAP_CONF_SUCCESS, flags), data);
3971 static inline int l2cap_config_req(struct l2cap_conn *conn,
3972 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3975 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3978 struct l2cap_chan *chan;
3981 if (cmd_len < sizeof(*req))
3984 dcid = __le16_to_cpu(req->dcid);
3985 flags = __le16_to_cpu(req->flags);
3987 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3989 chan = l2cap_get_chan_by_scid(conn, dcid);
3993 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3994 struct l2cap_cmd_rej_cid rej;
3996 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3997 rej.scid = cpu_to_le16(chan->scid);
3998 rej.dcid = cpu_to_le16(chan->dcid);
4000 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4005 /* Reject if config buffer is too small. */
4006 len = cmd_len - sizeof(*req);
4007 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4008 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4009 l2cap_build_conf_rsp(chan, rsp,
4010 L2CAP_CONF_REJECT, flags), rsp);
4015 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4016 chan->conf_len += len;
4018 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4019 /* Incomplete config. Send empty response. */
4020 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4021 l2cap_build_conf_rsp(chan, rsp,
4022 L2CAP_CONF_SUCCESS, flags), rsp);
4026 /* Complete config. */
4027 len = l2cap_parse_conf_req(chan, rsp);
4029 l2cap_send_disconn_req(chan, ECONNRESET);
4033 chan->ident = cmd->ident;
4034 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4035 chan->num_conf_rsp++;
4037 /* Reset config buffer. */
4040 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4043 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4044 set_default_fcs(chan);
4046 if (chan->mode == L2CAP_MODE_ERTM ||
4047 chan->mode == L2CAP_MODE_STREAMING)
4048 err = l2cap_ertm_init(chan);
4051 l2cap_send_disconn_req(chan, -err);
4053 l2cap_chan_ready(chan);
4058 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4060 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4061 l2cap_build_conf_req(chan, buf), buf);
4062 chan->num_conf_req++;
4065 /* Got Conf Rsp PENDING from remote side and asume we sent
4066 Conf Rsp PENDING in the code above */
4067 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4068 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4070 /* check compatibility */
4072 /* Send rsp for BR/EDR channel */
4074 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4076 chan->ident = cmd->ident;
4080 l2cap_chan_unlock(chan);
4084 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4085 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4088 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4089 u16 scid, flags, result;
4090 struct l2cap_chan *chan;
4091 int len = cmd_len - sizeof(*rsp);
4094 if (cmd_len < sizeof(*rsp))
4097 scid = __le16_to_cpu(rsp->scid);
4098 flags = __le16_to_cpu(rsp->flags);
4099 result = __le16_to_cpu(rsp->result);
4101 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4104 chan = l2cap_get_chan_by_scid(conn, scid);
4109 case L2CAP_CONF_SUCCESS:
4110 l2cap_conf_rfc_get(chan, rsp->data, len);
4111 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4114 case L2CAP_CONF_PENDING:
4115 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4117 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4120 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4123 l2cap_send_disconn_req(chan, ECONNRESET);
4127 if (!chan->hs_hcon) {
4128 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4131 if (l2cap_check_efs(chan)) {
4132 amp_create_logical_link(chan);
4133 chan->ident = cmd->ident;
4139 case L2CAP_CONF_UNACCEPT:
4140 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4143 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4144 l2cap_send_disconn_req(chan, ECONNRESET);
4148 /* throw out any old stored conf requests */
4149 result = L2CAP_CONF_SUCCESS;
4150 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4153 l2cap_send_disconn_req(chan, ECONNRESET);
4157 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4158 L2CAP_CONF_REQ, len, req);
4159 chan->num_conf_req++;
4160 if (result != L2CAP_CONF_SUCCESS)
4166 l2cap_chan_set_err(chan, ECONNRESET);
4168 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4169 l2cap_send_disconn_req(chan, ECONNRESET);
4173 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4176 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4178 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4179 set_default_fcs(chan);
4181 if (chan->mode == L2CAP_MODE_ERTM ||
4182 chan->mode == L2CAP_MODE_STREAMING)
4183 err = l2cap_ertm_init(chan);
4186 l2cap_send_disconn_req(chan, -err);
4188 l2cap_chan_ready(chan);
4192 l2cap_chan_unlock(chan);
4196 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4197 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4200 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4201 struct l2cap_disconn_rsp rsp;
4203 struct l2cap_chan *chan;
4206 if (cmd_len != sizeof(*req))
4209 scid = __le16_to_cpu(req->scid);
4210 dcid = __le16_to_cpu(req->dcid);
4212 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4214 mutex_lock(&conn->chan_lock);
4216 chan = __l2cap_get_chan_by_scid(conn, dcid);
4218 mutex_unlock(&conn->chan_lock);
4222 l2cap_chan_lock(chan);
4226 rsp.dcid = cpu_to_le16(chan->scid);
4227 rsp.scid = cpu_to_le16(chan->dcid);
4228 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4231 sk->sk_shutdown = SHUTDOWN_MASK;
4234 l2cap_chan_hold(chan);
4235 l2cap_chan_del(chan, ECONNRESET);
4237 l2cap_chan_unlock(chan);
4239 chan->ops->close(chan);
4240 l2cap_chan_put(chan);
4242 mutex_unlock(&conn->chan_lock);
4247 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4248 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4251 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4253 struct l2cap_chan *chan;
4255 if (cmd_len != sizeof(*rsp))
4258 scid = __le16_to_cpu(rsp->scid);
4259 dcid = __le16_to_cpu(rsp->dcid);
4261 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4263 mutex_lock(&conn->chan_lock);
4265 chan = __l2cap_get_chan_by_scid(conn, scid);
4267 mutex_unlock(&conn->chan_lock);
4271 l2cap_chan_lock(chan);
4273 l2cap_chan_hold(chan);
4274 l2cap_chan_del(chan, 0);
4276 l2cap_chan_unlock(chan);
4278 chan->ops->close(chan);
4279 l2cap_chan_put(chan);
4281 mutex_unlock(&conn->chan_lock);
4286 static inline int l2cap_information_req(struct l2cap_conn *conn,
4287 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4290 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4293 if (cmd_len != sizeof(*req))
4296 type = __le16_to_cpu(req->type);
4298 BT_DBG("type 0x%4.4x", type);
4300 if (type == L2CAP_IT_FEAT_MASK) {
4302 u32 feat_mask = l2cap_feat_mask;
4303 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4304 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4305 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4307 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4309 if (conn->hs_enabled)
4310 feat_mask |= L2CAP_FEAT_EXT_FLOW
4311 | L2CAP_FEAT_EXT_WINDOW;
4313 put_unaligned_le32(feat_mask, rsp->data);
4314 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4316 } else if (type == L2CAP_IT_FIXED_CHAN) {
4318 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4320 if (conn->hs_enabled)
4321 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4323 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4325 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4326 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4327 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4328 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4331 struct l2cap_info_rsp rsp;
4332 rsp.type = cpu_to_le16(type);
4333 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4334 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4341 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4342 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4345 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4348 if (cmd_len < sizeof(*rsp))
4351 type = __le16_to_cpu(rsp->type);
4352 result = __le16_to_cpu(rsp->result);
4354 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4356 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4357 if (cmd->ident != conn->info_ident ||
4358 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4361 cancel_delayed_work(&conn->info_timer);
4363 if (result != L2CAP_IR_SUCCESS) {
4364 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4365 conn->info_ident = 0;
4367 l2cap_conn_start(conn);
4373 case L2CAP_IT_FEAT_MASK:
4374 conn->feat_mask = get_unaligned_le32(rsp->data);
4376 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4377 struct l2cap_info_req req;
4378 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4380 conn->info_ident = l2cap_get_ident(conn);
4382 l2cap_send_cmd(conn, conn->info_ident,
4383 L2CAP_INFO_REQ, sizeof(req), &req);
4385 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4386 conn->info_ident = 0;
4388 l2cap_conn_start(conn);
4392 case L2CAP_IT_FIXED_CHAN:
4393 conn->fixed_chan_mask = rsp->data[0];
4394 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4395 conn->info_ident = 0;
4397 l2cap_conn_start(conn);
4404 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4405 struct l2cap_cmd_hdr *cmd,
4406 u16 cmd_len, void *data)
4408 struct l2cap_create_chan_req *req = data;
4409 struct l2cap_create_chan_rsp rsp;
4410 struct l2cap_chan *chan;
4411 struct hci_dev *hdev;
4414 if (cmd_len != sizeof(*req))
4417 if (!conn->hs_enabled)
4420 psm = le16_to_cpu(req->psm);
4421 scid = le16_to_cpu(req->scid);
4423 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4425 /* For controller id 0 make BR/EDR connection */
4426 if (req->amp_id == HCI_BREDR_ID) {
4427 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4432 /* Validate AMP controller id */
4433 hdev = hci_dev_get(req->amp_id);
4437 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4442 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4445 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4446 struct hci_conn *hs_hcon;
4448 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4454 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4456 mgr->bredr_chan = chan;
4457 chan->hs_hcon = hs_hcon;
4458 chan->fcs = L2CAP_FCS_NONE;
4459 conn->mtu = hdev->block_mtu;
4468 rsp.scid = cpu_to_le16(scid);
4469 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4470 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4472 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4478 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4480 struct l2cap_move_chan_req req;
4483 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4485 ident = l2cap_get_ident(chan->conn);
4486 chan->ident = ident;
4488 req.icid = cpu_to_le16(chan->scid);
4489 req.dest_amp_id = dest_amp_id;
4491 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4494 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4497 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4499 struct l2cap_move_chan_rsp rsp;
4501 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4503 rsp.icid = cpu_to_le16(chan->dcid);
4504 rsp.result = cpu_to_le16(result);
4506 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4510 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4512 struct l2cap_move_chan_cfm cfm;
4514 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4516 chan->ident = l2cap_get_ident(chan->conn);
4518 cfm.icid = cpu_to_le16(chan->scid);
4519 cfm.result = cpu_to_le16(result);
4521 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4524 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4527 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4529 struct l2cap_move_chan_cfm cfm;
4531 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4533 cfm.icid = cpu_to_le16(icid);
4534 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4536 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4540 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4543 struct l2cap_move_chan_cfm_rsp rsp;
4545 BT_DBG("icid 0x%4.4x", icid);
4547 rsp.icid = cpu_to_le16(icid);
4548 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4551 static void __release_logical_link(struct l2cap_chan *chan)
4553 chan->hs_hchan = NULL;
4554 chan->hs_hcon = NULL;
4556 /* Placeholder - release the logical link */
4559 static void l2cap_logical_fail(struct l2cap_chan *chan)
4561 /* Logical link setup failed */
4562 if (chan->state != BT_CONNECTED) {
4563 /* Create channel failure, disconnect */
4564 l2cap_send_disconn_req(chan, ECONNRESET);
4568 switch (chan->move_role) {
4569 case L2CAP_MOVE_ROLE_RESPONDER:
4570 l2cap_move_done(chan);
4571 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4573 case L2CAP_MOVE_ROLE_INITIATOR:
4574 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4575 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4576 /* Remote has only sent pending or
4577 * success responses, clean up
4579 l2cap_move_done(chan);
4582 /* Other amp move states imply that the move
4583 * has already aborted
4585 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4590 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4591 struct hci_chan *hchan)
4593 struct l2cap_conf_rsp rsp;
4595 chan->hs_hchan = hchan;
4596 chan->hs_hcon->l2cap_data = chan->conn;
4598 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4600 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4603 set_default_fcs(chan);
4605 err = l2cap_ertm_init(chan);
4607 l2cap_send_disconn_req(chan, -err);
4609 l2cap_chan_ready(chan);
4613 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4614 struct hci_chan *hchan)
4616 chan->hs_hcon = hchan->conn;
4617 chan->hs_hcon->l2cap_data = chan->conn;
4619 BT_DBG("move_state %d", chan->move_state);
4621 switch (chan->move_state) {
4622 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4623 /* Move confirm will be sent after a success
4624 * response is received
4626 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4628 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4629 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4630 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4631 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4632 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4633 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4634 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4635 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4636 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4640 /* Move was not in expected state, free the channel */
4641 __release_logical_link(chan);
4643 chan->move_state = L2CAP_MOVE_STABLE;
4647 /* Call with chan locked */
4648 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4651 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4654 l2cap_logical_fail(chan);
4655 __release_logical_link(chan);
4659 if (chan->state != BT_CONNECTED) {
4660 /* Ignore logical link if channel is on BR/EDR */
4661 if (chan->local_amp_id)
4662 l2cap_logical_finish_create(chan, hchan);
4664 l2cap_logical_finish_move(chan, hchan);
4668 void l2cap_move_start(struct l2cap_chan *chan)
4670 BT_DBG("chan %p", chan);
4672 if (chan->local_amp_id == HCI_BREDR_ID) {
4673 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4675 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4676 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4677 /* Placeholder - start physical link setup */
4679 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4680 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4682 l2cap_move_setup(chan);
4683 l2cap_send_move_chan_req(chan, 0);
4687 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4688 u8 local_amp_id, u8 remote_amp_id)
4690 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4691 local_amp_id, remote_amp_id);
4693 chan->fcs = L2CAP_FCS_NONE;
4695 /* Outgoing channel on AMP */
4696 if (chan->state == BT_CONNECT) {
4697 if (result == L2CAP_CR_SUCCESS) {
4698 chan->local_amp_id = local_amp_id;
4699 l2cap_send_create_chan_req(chan, remote_amp_id);
4701 /* Revert to BR/EDR connect */
4702 l2cap_send_conn_req(chan);
4708 /* Incoming channel on AMP */
4709 if (__l2cap_no_conn_pending(chan)) {
4710 struct l2cap_conn_rsp rsp;
4712 rsp.scid = cpu_to_le16(chan->dcid);
4713 rsp.dcid = cpu_to_le16(chan->scid);
4715 if (result == L2CAP_CR_SUCCESS) {
4716 /* Send successful response */
4717 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4718 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4720 /* Send negative response */
4721 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4722 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4725 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4728 if (result == L2CAP_CR_SUCCESS) {
4729 __l2cap_state_change(chan, BT_CONFIG);
4730 set_bit(CONF_REQ_SENT, &chan->conf_state);
4731 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4733 l2cap_build_conf_req(chan, buf), buf);
4734 chan->num_conf_req++;
4739 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4742 l2cap_move_setup(chan);
4743 chan->move_id = local_amp_id;
4744 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4746 l2cap_send_move_chan_req(chan, remote_amp_id);
4749 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4751 struct hci_chan *hchan = NULL;
4753 /* Placeholder - get hci_chan for logical link */
4756 if (hchan->state == BT_CONNECTED) {
4757 /* Logical link is ready to go */
4758 chan->hs_hcon = hchan->conn;
4759 chan->hs_hcon->l2cap_data = chan->conn;
4760 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4761 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4763 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4765 /* Wait for logical link to be ready */
4766 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4769 /* Logical link not available */
4770 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4774 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4776 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4778 if (result == -EINVAL)
4779 rsp_result = L2CAP_MR_BAD_ID;
4781 rsp_result = L2CAP_MR_NOT_ALLOWED;
4783 l2cap_send_move_chan_rsp(chan, rsp_result);
4786 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4787 chan->move_state = L2CAP_MOVE_STABLE;
4789 /* Restart data transmission */
4790 l2cap_ertm_send(chan);
4793 /* Invoke with locked chan */
4794 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4796 u8 local_amp_id = chan->local_amp_id;
4797 u8 remote_amp_id = chan->remote_amp_id;
4799 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4800 chan, result, local_amp_id, remote_amp_id);
4802 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4803 l2cap_chan_unlock(chan);
4807 if (chan->state != BT_CONNECTED) {
4808 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4809 } else if (result != L2CAP_MR_SUCCESS) {
4810 l2cap_do_move_cancel(chan, result);
4812 switch (chan->move_role) {
4813 case L2CAP_MOVE_ROLE_INITIATOR:
4814 l2cap_do_move_initiate(chan, local_amp_id,
4817 case L2CAP_MOVE_ROLE_RESPONDER:
4818 l2cap_do_move_respond(chan, result);
4821 l2cap_do_move_cancel(chan, result);
4827 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4828 struct l2cap_cmd_hdr *cmd,
4829 u16 cmd_len, void *data)
4831 struct l2cap_move_chan_req *req = data;
4832 struct l2cap_move_chan_rsp rsp;
4833 struct l2cap_chan *chan;
4835 u16 result = L2CAP_MR_NOT_ALLOWED;
4837 if (cmd_len != sizeof(*req))
4840 icid = le16_to_cpu(req->icid);
4842 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4844 if (!conn->hs_enabled)
4847 chan = l2cap_get_chan_by_dcid(conn, icid);
4849 rsp.icid = cpu_to_le16(icid);
4850 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4851 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4856 chan->ident = cmd->ident;
4858 if (chan->scid < L2CAP_CID_DYN_START ||
4859 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4860 (chan->mode != L2CAP_MODE_ERTM &&
4861 chan->mode != L2CAP_MODE_STREAMING)) {
4862 result = L2CAP_MR_NOT_ALLOWED;
4863 goto send_move_response;
4866 if (chan->local_amp_id == req->dest_amp_id) {
4867 result = L2CAP_MR_SAME_ID;
4868 goto send_move_response;
4871 if (req->dest_amp_id) {
4872 struct hci_dev *hdev;
4873 hdev = hci_dev_get(req->dest_amp_id);
4874 if (!hdev || hdev->dev_type != HCI_AMP ||
4875 !test_bit(HCI_UP, &hdev->flags)) {
4879 result = L2CAP_MR_BAD_ID;
4880 goto send_move_response;
4885 /* Detect a move collision. Only send a collision response
4886 * if this side has "lost", otherwise proceed with the move.
4887 * The winner has the larger bd_addr.
4889 if ((__chan_is_moving(chan) ||
4890 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4891 bacmp(conn->src, conn->dst) > 0) {
4892 result = L2CAP_MR_COLLISION;
4893 goto send_move_response;
4896 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4897 l2cap_move_setup(chan);
4898 chan->move_id = req->dest_amp_id;
4901 if (!req->dest_amp_id) {
4902 /* Moving to BR/EDR */
4903 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4904 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4905 result = L2CAP_MR_PEND;
4907 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4908 result = L2CAP_MR_SUCCESS;
4911 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4912 /* Placeholder - uncomment when amp functions are available */
4913 /*amp_accept_physical(chan, req->dest_amp_id);*/
4914 result = L2CAP_MR_PEND;
4918 l2cap_send_move_chan_rsp(chan, result);
4920 l2cap_chan_unlock(chan);
4925 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4927 struct l2cap_chan *chan;
4928 struct hci_chan *hchan = NULL;
4930 chan = l2cap_get_chan_by_scid(conn, icid);
4932 l2cap_send_move_chan_cfm_icid(conn, icid);
4936 __clear_chan_timer(chan);
4937 if (result == L2CAP_MR_PEND)
4938 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4940 switch (chan->move_state) {
4941 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4942 /* Move confirm will be sent when logical link
4945 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4947 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4948 if (result == L2CAP_MR_PEND) {
4950 } else if (test_bit(CONN_LOCAL_BUSY,
4951 &chan->conn_state)) {
4952 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4954 /* Logical link is up or moving to BR/EDR,
4957 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4958 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4961 case L2CAP_MOVE_WAIT_RSP:
4963 if (result == L2CAP_MR_SUCCESS) {
4964 /* Remote is ready, send confirm immediately
4965 * after logical link is ready
4967 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4969 /* Both logical link and move success
4970 * are required to confirm
4972 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4975 /* Placeholder - get hci_chan for logical link */
4977 /* Logical link not available */
4978 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4982 /* If the logical link is not yet connected, do not
4983 * send confirmation.
4985 if (hchan->state != BT_CONNECTED)
4988 /* Logical link is already ready to go */
4990 chan->hs_hcon = hchan->conn;
4991 chan->hs_hcon->l2cap_data = chan->conn;
4993 if (result == L2CAP_MR_SUCCESS) {
4994 /* Can confirm now */
4995 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4997 /* Now only need move success
5000 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5003 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5006 /* Any other amp move state means the move failed. */
5007 chan->move_id = chan->local_amp_id;
5008 l2cap_move_done(chan);
5009 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5012 l2cap_chan_unlock(chan);
5015 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5018 struct l2cap_chan *chan;
5020 chan = l2cap_get_chan_by_ident(conn, ident);
5022 /* Could not locate channel, icid is best guess */
5023 l2cap_send_move_chan_cfm_icid(conn, icid);
5027 __clear_chan_timer(chan);
5029 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5030 if (result == L2CAP_MR_COLLISION) {
5031 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5033 /* Cleanup - cancel move */
5034 chan->move_id = chan->local_amp_id;
5035 l2cap_move_done(chan);
5039 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5041 l2cap_chan_unlock(chan);
5044 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5045 struct l2cap_cmd_hdr *cmd,
5046 u16 cmd_len, void *data)
5048 struct l2cap_move_chan_rsp *rsp = data;
5051 if (cmd_len != sizeof(*rsp))
5054 icid = le16_to_cpu(rsp->icid);
5055 result = le16_to_cpu(rsp->result);
5057 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5059 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5060 l2cap_move_continue(conn, icid, result);
5062 l2cap_move_fail(conn, cmd->ident, icid, result);
5067 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5068 struct l2cap_cmd_hdr *cmd,
5069 u16 cmd_len, void *data)
5071 struct l2cap_move_chan_cfm *cfm = data;
5072 struct l2cap_chan *chan;
5075 if (cmd_len != sizeof(*cfm))
5078 icid = le16_to_cpu(cfm->icid);
5079 result = le16_to_cpu(cfm->result);
5081 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5083 chan = l2cap_get_chan_by_dcid(conn, icid);
5085 /* Spec requires a response even if the icid was not found */
5086 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5090 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5091 if (result == L2CAP_MC_CONFIRMED) {
5092 chan->local_amp_id = chan->move_id;
5093 if (!chan->local_amp_id)
5094 __release_logical_link(chan);
5096 chan->move_id = chan->local_amp_id;
5099 l2cap_move_done(chan);
5102 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5104 l2cap_chan_unlock(chan);
5109 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5110 struct l2cap_cmd_hdr *cmd,
5111 u16 cmd_len, void *data)
5113 struct l2cap_move_chan_cfm_rsp *rsp = data;
5114 struct l2cap_chan *chan;
5117 if (cmd_len != sizeof(*rsp))
5120 icid = le16_to_cpu(rsp->icid);
5122 BT_DBG("icid 0x%4.4x", icid);
5124 chan = l2cap_get_chan_by_scid(conn, icid);
5128 __clear_chan_timer(chan);
5130 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5131 chan->local_amp_id = chan->move_id;
5133 if (!chan->local_amp_id && chan->hs_hchan)
5134 __release_logical_link(chan);
5136 l2cap_move_done(chan);
5139 l2cap_chan_unlock(chan);
5144 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5149 if (min > max || min < 6 || max > 3200)
5152 if (to_multiplier < 10 || to_multiplier > 3200)
5155 if (max >= to_multiplier * 8)
5158 max_latency = (to_multiplier * 8 / max) - 1;
5159 if (latency > 499 || latency > max_latency)
5165 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5166 struct l2cap_cmd_hdr *cmd,
5169 struct hci_conn *hcon = conn->hcon;
5170 struct l2cap_conn_param_update_req *req;
5171 struct l2cap_conn_param_update_rsp rsp;
5172 u16 min, max, latency, to_multiplier, cmd_len;
5175 if (!(hcon->link_mode & HCI_LM_MASTER))
5178 cmd_len = __le16_to_cpu(cmd->len);
5179 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5182 req = (struct l2cap_conn_param_update_req *) data;
5183 min = __le16_to_cpu(req->min);
5184 max = __le16_to_cpu(req->max);
5185 latency = __le16_to_cpu(req->latency);
5186 to_multiplier = __le16_to_cpu(req->to_multiplier);
5188 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5189 min, max, latency, to_multiplier);
5191 memset(&rsp, 0, sizeof(rsp));
5193 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5195 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5197 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5199 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5203 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5208 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5209 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5214 switch (cmd->code) {
5215 case L2CAP_COMMAND_REJ:
5216 l2cap_command_rej(conn, cmd, cmd_len, data);
5219 case L2CAP_CONN_REQ:
5220 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5223 case L2CAP_CONN_RSP:
5224 case L2CAP_CREATE_CHAN_RSP:
5225 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5228 case L2CAP_CONF_REQ:
5229 err = l2cap_config_req(conn, cmd, cmd_len, data);
5232 case L2CAP_CONF_RSP:
5233 l2cap_config_rsp(conn, cmd, cmd_len, data);
5236 case L2CAP_DISCONN_REQ:
5237 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5240 case L2CAP_DISCONN_RSP:
5241 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5244 case L2CAP_ECHO_REQ:
5245 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5248 case L2CAP_ECHO_RSP:
5251 case L2CAP_INFO_REQ:
5252 err = l2cap_information_req(conn, cmd, cmd_len, data);
5255 case L2CAP_INFO_RSP:
5256 l2cap_information_rsp(conn, cmd, cmd_len, data);
5259 case L2CAP_CREATE_CHAN_REQ:
5260 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5263 case L2CAP_MOVE_CHAN_REQ:
5264 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5267 case L2CAP_MOVE_CHAN_RSP:
5268 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5271 case L2CAP_MOVE_CHAN_CFM:
5272 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5275 case L2CAP_MOVE_CHAN_CFM_RSP:
5276 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5280 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5288 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5289 struct l2cap_cmd_hdr *cmd, u8 *data)
5291 switch (cmd->code) {
5292 case L2CAP_COMMAND_REJ:
5295 case L2CAP_CONN_PARAM_UPDATE_REQ:
5296 return l2cap_conn_param_update_req(conn, cmd, data);
5298 case L2CAP_CONN_PARAM_UPDATE_RSP:
5302 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5307 static __le16 l2cap_err_to_reason(int err)
5311 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5313 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5317 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5321 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5322 struct sk_buff *skb)
5324 struct hci_conn *hcon = conn->hcon;
5325 struct l2cap_cmd_hdr *cmd;
5329 if (hcon->type != LE_LINK)
5332 if (skb->len < L2CAP_CMD_HDR_SIZE)
5335 cmd = (void *) skb->data;
5336 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5338 len = le16_to_cpu(cmd->len);
5340 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5342 if (len != skb->len || !cmd->ident) {
5343 BT_DBG("corrupted command");
5347 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5349 struct l2cap_cmd_rej_unk rej;
5351 BT_ERR("Wrong link type (%d)", err);
5353 rej.reason = l2cap_err_to_reason(err);
5354 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5362 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5363 struct sk_buff *skb)
5365 struct hci_conn *hcon = conn->hcon;
5366 u8 *data = skb->data;
5368 struct l2cap_cmd_hdr cmd;
5371 l2cap_raw_recv(conn, skb);
5373 if (hcon->type != ACL_LINK)
5376 while (len >= L2CAP_CMD_HDR_SIZE) {
5378 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5379 data += L2CAP_CMD_HDR_SIZE;
5380 len -= L2CAP_CMD_HDR_SIZE;
5382 cmd_len = le16_to_cpu(cmd.len);
5384 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5387 if (cmd_len > len || !cmd.ident) {
5388 BT_DBG("corrupted command");
5392 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5394 struct l2cap_cmd_rej_unk rej;
5396 BT_ERR("Wrong link type (%d)", err);
5398 rej.reason = l2cap_err_to_reason(err);
5399 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5411 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5413 u16 our_fcs, rcv_fcs;
5416 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5417 hdr_size = L2CAP_EXT_HDR_SIZE;
5419 hdr_size = L2CAP_ENH_HDR_SIZE;
5421 if (chan->fcs == L2CAP_FCS_CRC16) {
5422 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5423 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5424 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5426 if (our_fcs != rcv_fcs)
5432 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5434 struct l2cap_ctrl control;
5436 BT_DBG("chan %p", chan);
5438 memset(&control, 0, sizeof(control));
5441 control.reqseq = chan->buffer_seq;
5442 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5444 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5445 control.super = L2CAP_SUPER_RNR;
5446 l2cap_send_sframe(chan, &control);
5449 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5450 chan->unacked_frames > 0)
5451 __set_retrans_timer(chan);
5453 /* Send pending iframes */
5454 l2cap_ertm_send(chan);
5456 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5457 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5458 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5461 control.super = L2CAP_SUPER_RR;
5462 l2cap_send_sframe(chan, &control);
5466 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5467 struct sk_buff **last_frag)
5469 /* skb->len reflects data in skb as well as all fragments
5470 * skb->data_len reflects only data in fragments
5472 if (!skb_has_frag_list(skb))
5473 skb_shinfo(skb)->frag_list = new_frag;
5475 new_frag->next = NULL;
5477 (*last_frag)->next = new_frag;
5478 *last_frag = new_frag;
5480 skb->len += new_frag->len;
5481 skb->data_len += new_frag->len;
5482 skb->truesize += new_frag->truesize;
5485 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5486 struct l2cap_ctrl *control)
5490 switch (control->sar) {
5491 case L2CAP_SAR_UNSEGMENTED:
5495 err = chan->ops->recv(chan, skb);
5498 case L2CAP_SAR_START:
5502 chan->sdu_len = get_unaligned_le16(skb->data);
5503 skb_pull(skb, L2CAP_SDULEN_SIZE);
5505 if (chan->sdu_len > chan->imtu) {
5510 if (skb->len >= chan->sdu_len)
5514 chan->sdu_last_frag = skb;
5520 case L2CAP_SAR_CONTINUE:
5524 append_skb_frag(chan->sdu, skb,
5525 &chan->sdu_last_frag);
5528 if (chan->sdu->len >= chan->sdu_len)
5538 append_skb_frag(chan->sdu, skb,
5539 &chan->sdu_last_frag);
5542 if (chan->sdu->len != chan->sdu_len)
5545 err = chan->ops->recv(chan, chan->sdu);
5548 /* Reassembly complete */
5550 chan->sdu_last_frag = NULL;
5558 kfree_skb(chan->sdu);
5560 chan->sdu_last_frag = NULL;
5567 static int l2cap_resegment(struct l2cap_chan *chan)
5573 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5577 if (chan->mode != L2CAP_MODE_ERTM)
5580 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5581 l2cap_tx(chan, NULL, NULL, event);
5584 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5587 /* Pass sequential frames to l2cap_reassemble_sdu()
5588 * until a gap is encountered.
5591 BT_DBG("chan %p", chan);
5593 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5594 struct sk_buff *skb;
5595 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5596 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5598 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5603 skb_unlink(skb, &chan->srej_q);
5604 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5605 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5610 if (skb_queue_empty(&chan->srej_q)) {
5611 chan->rx_state = L2CAP_RX_STATE_RECV;
5612 l2cap_send_ack(chan);
5618 static void l2cap_handle_srej(struct l2cap_chan *chan,
5619 struct l2cap_ctrl *control)
5621 struct sk_buff *skb;
5623 BT_DBG("chan %p, control %p", chan, control);
5625 if (control->reqseq == chan->next_tx_seq) {
5626 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5627 l2cap_send_disconn_req(chan, ECONNRESET);
5631 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5634 BT_DBG("Seq %d not available for retransmission",
5639 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5640 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5641 l2cap_send_disconn_req(chan, ECONNRESET);
5645 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5647 if (control->poll) {
5648 l2cap_pass_to_tx(chan, control);
5650 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5651 l2cap_retransmit(chan, control);
5652 l2cap_ertm_send(chan);
5654 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5655 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5656 chan->srej_save_reqseq = control->reqseq;
5659 l2cap_pass_to_tx_fbit(chan, control);
5661 if (control->final) {
5662 if (chan->srej_save_reqseq != control->reqseq ||
5663 !test_and_clear_bit(CONN_SREJ_ACT,
5665 l2cap_retransmit(chan, control);
5667 l2cap_retransmit(chan, control);
5668 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5669 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5670 chan->srej_save_reqseq = control->reqseq;
5676 static void l2cap_handle_rej(struct l2cap_chan *chan,
5677 struct l2cap_ctrl *control)
5679 struct sk_buff *skb;
5681 BT_DBG("chan %p, control %p", chan, control);
5683 if (control->reqseq == chan->next_tx_seq) {
5684 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5685 l2cap_send_disconn_req(chan, ECONNRESET);
5689 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5691 if (chan->max_tx && skb &&
5692 bt_cb(skb)->control.retries >= chan->max_tx) {
5693 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5694 l2cap_send_disconn_req(chan, ECONNRESET);
5698 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5700 l2cap_pass_to_tx(chan, control);
5702 if (control->final) {
5703 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5704 l2cap_retransmit_all(chan, control);
5706 l2cap_retransmit_all(chan, control);
5707 l2cap_ertm_send(chan);
5708 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5709 set_bit(CONN_REJ_ACT, &chan->conn_state);
5713 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5715 BT_DBG("chan %p, txseq %d", chan, txseq);
5717 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5718 chan->expected_tx_seq);
5720 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5721 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5723 /* See notes below regarding "double poll" and
5726 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5727 BT_DBG("Invalid/Ignore - after SREJ");
5728 return L2CAP_TXSEQ_INVALID_IGNORE;
5730 BT_DBG("Invalid - in window after SREJ sent");
5731 return L2CAP_TXSEQ_INVALID;
5735 if (chan->srej_list.head == txseq) {
5736 BT_DBG("Expected SREJ");
5737 return L2CAP_TXSEQ_EXPECTED_SREJ;
5740 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5741 BT_DBG("Duplicate SREJ - txseq already stored");
5742 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5745 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5746 BT_DBG("Unexpected SREJ - not requested");
5747 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5751 if (chan->expected_tx_seq == txseq) {
5752 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5754 BT_DBG("Invalid - txseq outside tx window");
5755 return L2CAP_TXSEQ_INVALID;
5758 return L2CAP_TXSEQ_EXPECTED;
5762 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5763 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5764 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5765 return L2CAP_TXSEQ_DUPLICATE;
5768 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5769 /* A source of invalid packets is a "double poll" condition,
5770 * where delays cause us to send multiple poll packets. If
5771 * the remote stack receives and processes both polls,
5772 * sequence numbers can wrap around in such a way that a
5773 * resent frame has a sequence number that looks like new data
5774 * with a sequence gap. This would trigger an erroneous SREJ
5777 * Fortunately, this is impossible with a tx window that's
5778 * less than half of the maximum sequence number, which allows
5779 * invalid frames to be safely ignored.
5781 * With tx window sizes greater than half of the tx window
5782 * maximum, the frame is invalid and cannot be ignored. This
5783 * causes a disconnect.
5786 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5787 BT_DBG("Invalid/Ignore - txseq outside tx window");
5788 return L2CAP_TXSEQ_INVALID_IGNORE;
5790 BT_DBG("Invalid - txseq outside tx window");
5791 return L2CAP_TXSEQ_INVALID;
5794 BT_DBG("Unexpected - txseq indicates missing frames");
5795 return L2CAP_TXSEQ_UNEXPECTED;
5799 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5800 struct l2cap_ctrl *control,
5801 struct sk_buff *skb, u8 event)
5804 bool skb_in_use = false;
5806 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5810 case L2CAP_EV_RECV_IFRAME:
5811 switch (l2cap_classify_txseq(chan, control->txseq)) {
5812 case L2CAP_TXSEQ_EXPECTED:
5813 l2cap_pass_to_tx(chan, control);
5815 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5816 BT_DBG("Busy, discarding expected seq %d",
5821 chan->expected_tx_seq = __next_seq(chan,
5824 chan->buffer_seq = chan->expected_tx_seq;
5827 err = l2cap_reassemble_sdu(chan, skb, control);
5831 if (control->final) {
5832 if (!test_and_clear_bit(CONN_REJ_ACT,
5833 &chan->conn_state)) {
5835 l2cap_retransmit_all(chan, control);
5836 l2cap_ertm_send(chan);
5840 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5841 l2cap_send_ack(chan);
5843 case L2CAP_TXSEQ_UNEXPECTED:
5844 l2cap_pass_to_tx(chan, control);
5846 /* Can't issue SREJ frames in the local busy state.
5847 * Drop this frame, it will be seen as missing
5848 * when local busy is exited.
5850 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5851 BT_DBG("Busy, discarding unexpected seq %d",
5856 /* There was a gap in the sequence, so an SREJ
5857 * must be sent for each missing frame. The
5858 * current frame is stored for later use.
5860 skb_queue_tail(&chan->srej_q, skb);
5862 BT_DBG("Queued %p (queue len %d)", skb,
5863 skb_queue_len(&chan->srej_q));
5865 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5866 l2cap_seq_list_clear(&chan->srej_list);
5867 l2cap_send_srej(chan, control->txseq);
5869 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5871 case L2CAP_TXSEQ_DUPLICATE:
5872 l2cap_pass_to_tx(chan, control);
5874 case L2CAP_TXSEQ_INVALID_IGNORE:
5876 case L2CAP_TXSEQ_INVALID:
5878 l2cap_send_disconn_req(chan, ECONNRESET);
5882 case L2CAP_EV_RECV_RR:
5883 l2cap_pass_to_tx(chan, control);
5884 if (control->final) {
5885 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5887 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5888 !__chan_is_moving(chan)) {
5890 l2cap_retransmit_all(chan, control);
5893 l2cap_ertm_send(chan);
5894 } else if (control->poll) {
5895 l2cap_send_i_or_rr_or_rnr(chan);
5897 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5898 &chan->conn_state) &&
5899 chan->unacked_frames)
5900 __set_retrans_timer(chan);
5902 l2cap_ertm_send(chan);
5905 case L2CAP_EV_RECV_RNR:
5906 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5907 l2cap_pass_to_tx(chan, control);
5908 if (control && control->poll) {
5909 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5910 l2cap_send_rr_or_rnr(chan, 0);
5912 __clear_retrans_timer(chan);
5913 l2cap_seq_list_clear(&chan->retrans_list);
5915 case L2CAP_EV_RECV_REJ:
5916 l2cap_handle_rej(chan, control);
5918 case L2CAP_EV_RECV_SREJ:
5919 l2cap_handle_srej(chan, control);
5925 if (skb && !skb_in_use) {
5926 BT_DBG("Freeing %p", skb);
5933 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5934 struct l2cap_ctrl *control,
5935 struct sk_buff *skb, u8 event)
5938 u16 txseq = control->txseq;
5939 bool skb_in_use = false;
5941 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5945 case L2CAP_EV_RECV_IFRAME:
5946 switch (l2cap_classify_txseq(chan, txseq)) {
5947 case L2CAP_TXSEQ_EXPECTED:
5948 /* Keep frame for reassembly later */
5949 l2cap_pass_to_tx(chan, control);
5950 skb_queue_tail(&chan->srej_q, skb);
5952 BT_DBG("Queued %p (queue len %d)", skb,
5953 skb_queue_len(&chan->srej_q));
5955 chan->expected_tx_seq = __next_seq(chan, txseq);
5957 case L2CAP_TXSEQ_EXPECTED_SREJ:
5958 l2cap_seq_list_pop(&chan->srej_list);
5960 l2cap_pass_to_tx(chan, control);
5961 skb_queue_tail(&chan->srej_q, skb);
5963 BT_DBG("Queued %p (queue len %d)", skb,
5964 skb_queue_len(&chan->srej_q));
5966 err = l2cap_rx_queued_iframes(chan);
5971 case L2CAP_TXSEQ_UNEXPECTED:
5972 /* Got a frame that can't be reassembled yet.
5973 * Save it for later, and send SREJs to cover
5974 * the missing frames.
5976 skb_queue_tail(&chan->srej_q, skb);
5978 BT_DBG("Queued %p (queue len %d)", skb,
5979 skb_queue_len(&chan->srej_q));
5981 l2cap_pass_to_tx(chan, control);
5982 l2cap_send_srej(chan, control->txseq);
5984 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5985 /* This frame was requested with an SREJ, but
5986 * some expected retransmitted frames are
5987 * missing. Request retransmission of missing
5990 skb_queue_tail(&chan->srej_q, skb);
5992 BT_DBG("Queued %p (queue len %d)", skb,
5993 skb_queue_len(&chan->srej_q));
5995 l2cap_pass_to_tx(chan, control);
5996 l2cap_send_srej_list(chan, control->txseq);
5998 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5999 /* We've already queued this frame. Drop this copy. */
6000 l2cap_pass_to_tx(chan, control);
6002 case L2CAP_TXSEQ_DUPLICATE:
6003 /* Expecting a later sequence number, so this frame
6004 * was already received. Ignore it completely.
6007 case L2CAP_TXSEQ_INVALID_IGNORE:
6009 case L2CAP_TXSEQ_INVALID:
6011 l2cap_send_disconn_req(chan, ECONNRESET);
6015 case L2CAP_EV_RECV_RR:
6016 l2cap_pass_to_tx(chan, control);
6017 if (control->final) {
6018 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6020 if (!test_and_clear_bit(CONN_REJ_ACT,
6021 &chan->conn_state)) {
6023 l2cap_retransmit_all(chan, control);
6026 l2cap_ertm_send(chan);
6027 } else if (control->poll) {
6028 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6029 &chan->conn_state) &&
6030 chan->unacked_frames) {
6031 __set_retrans_timer(chan);
6034 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6035 l2cap_send_srej_tail(chan);
6037 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6038 &chan->conn_state) &&
6039 chan->unacked_frames)
6040 __set_retrans_timer(chan);
6042 l2cap_send_ack(chan);
6045 case L2CAP_EV_RECV_RNR:
6046 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6047 l2cap_pass_to_tx(chan, control);
6048 if (control->poll) {
6049 l2cap_send_srej_tail(chan);
6051 struct l2cap_ctrl rr_control;
6052 memset(&rr_control, 0, sizeof(rr_control));
6053 rr_control.sframe = 1;
6054 rr_control.super = L2CAP_SUPER_RR;
6055 rr_control.reqseq = chan->buffer_seq;
6056 l2cap_send_sframe(chan, &rr_control);
6060 case L2CAP_EV_RECV_REJ:
6061 l2cap_handle_rej(chan, control);
6063 case L2CAP_EV_RECV_SREJ:
6064 l2cap_handle_srej(chan, control);
6068 if (skb && !skb_in_use) {
6069 BT_DBG("Freeing %p", skb);
6076 static int l2cap_finish_move(struct l2cap_chan *chan)
6078 BT_DBG("chan %p", chan);
6080 chan->rx_state = L2CAP_RX_STATE_RECV;
6083 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6085 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6087 return l2cap_resegment(chan);
6090 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6091 struct l2cap_ctrl *control,
6092 struct sk_buff *skb, u8 event)
6096 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6102 l2cap_process_reqseq(chan, control->reqseq);
6104 if (!skb_queue_empty(&chan->tx_q))
6105 chan->tx_send_head = skb_peek(&chan->tx_q);
6107 chan->tx_send_head = NULL;
6109 /* Rewind next_tx_seq to the point expected
6112 chan->next_tx_seq = control->reqseq;
6113 chan->unacked_frames = 0;
6115 err = l2cap_finish_move(chan);
6119 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6120 l2cap_send_i_or_rr_or_rnr(chan);
6122 if (event == L2CAP_EV_RECV_IFRAME)
6125 return l2cap_rx_state_recv(chan, control, NULL, event);
6128 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6129 struct l2cap_ctrl *control,
6130 struct sk_buff *skb, u8 event)
6134 if (!control->final)
6137 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6139 chan->rx_state = L2CAP_RX_STATE_RECV;
6140 l2cap_process_reqseq(chan, control->reqseq);
6142 if (!skb_queue_empty(&chan->tx_q))
6143 chan->tx_send_head = skb_peek(&chan->tx_q);
6145 chan->tx_send_head = NULL;
6147 /* Rewind next_tx_seq to the point expected
6150 chan->next_tx_seq = control->reqseq;
6151 chan->unacked_frames = 0;
6154 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6156 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6158 err = l2cap_resegment(chan);
6161 err = l2cap_rx_state_recv(chan, control, skb, event);
6166 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6168 /* Make sure reqseq is for a packet that has been sent but not acked */
6171 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6172 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6175 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6176 struct sk_buff *skb, u8 event)
6180 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6181 control, skb, event, chan->rx_state);
6183 if (__valid_reqseq(chan, control->reqseq)) {
6184 switch (chan->rx_state) {
6185 case L2CAP_RX_STATE_RECV:
6186 err = l2cap_rx_state_recv(chan, control, skb, event);
6188 case L2CAP_RX_STATE_SREJ_SENT:
6189 err = l2cap_rx_state_srej_sent(chan, control, skb,
6192 case L2CAP_RX_STATE_WAIT_P:
6193 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6195 case L2CAP_RX_STATE_WAIT_F:
6196 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6203 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6204 control->reqseq, chan->next_tx_seq,
6205 chan->expected_ack_seq);
6206 l2cap_send_disconn_req(chan, ECONNRESET);
6212 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6213 struct sk_buff *skb)
6217 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6220 if (l2cap_classify_txseq(chan, control->txseq) ==
6221 L2CAP_TXSEQ_EXPECTED) {
6222 l2cap_pass_to_tx(chan, control);
6224 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6225 __next_seq(chan, chan->buffer_seq));
6227 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6229 l2cap_reassemble_sdu(chan, skb, control);
6232 kfree_skb(chan->sdu);
6235 chan->sdu_last_frag = NULL;
6239 BT_DBG("Freeing %p", skb);
6244 chan->last_acked_seq = control->txseq;
6245 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6250 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6252 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6256 __unpack_control(chan, skb);
6261 * We can just drop the corrupted I-frame here.
6262 * Receiver will miss it and start proper recovery
6263 * procedures and ask for retransmission.
6265 if (l2cap_check_fcs(chan, skb))
6268 if (!control->sframe && control->sar == L2CAP_SAR_START)
6269 len -= L2CAP_SDULEN_SIZE;
6271 if (chan->fcs == L2CAP_FCS_CRC16)
6272 len -= L2CAP_FCS_SIZE;
6274 if (len > chan->mps) {
6275 l2cap_send_disconn_req(chan, ECONNRESET);
6279 if (!control->sframe) {
6282 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6283 control->sar, control->reqseq, control->final,
6286 /* Validate F-bit - F=0 always valid, F=1 only
6287 * valid in TX WAIT_F
6289 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6292 if (chan->mode != L2CAP_MODE_STREAMING) {
6293 event = L2CAP_EV_RECV_IFRAME;
6294 err = l2cap_rx(chan, control, skb, event);
6296 err = l2cap_stream_rx(chan, control, skb);
6300 l2cap_send_disconn_req(chan, ECONNRESET);
6302 const u8 rx_func_to_event[4] = {
6303 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6304 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6307 /* Only I-frames are expected in streaming mode */
6308 if (chan->mode == L2CAP_MODE_STREAMING)
6311 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6312 control->reqseq, control->final, control->poll,
6316 BT_ERR("Trailing bytes: %d in sframe", len);
6317 l2cap_send_disconn_req(chan, ECONNRESET);
6321 /* Validate F and P bits */
6322 if (control->final && (control->poll ||
6323 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6326 event = rx_func_to_event[control->super];
6327 if (l2cap_rx(chan, control, skb, event))
6328 l2cap_send_disconn_req(chan, ECONNRESET);
6338 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6339 struct sk_buff *skb)
6341 struct l2cap_chan *chan;
6343 chan = l2cap_get_chan_by_scid(conn, cid);
6345 if (cid == L2CAP_CID_A2MP) {
6346 chan = a2mp_channel_create(conn, skb);
6352 l2cap_chan_lock(chan);
6354 BT_DBG("unknown cid 0x%4.4x", cid);
6355 /* Drop packet and return */
6361 BT_DBG("chan %p, len %d", chan, skb->len);
6363 if (chan->state != BT_CONNECTED)
6366 switch (chan->mode) {
6367 case L2CAP_MODE_BASIC:
6368 /* If socket recv buffers overflows we drop data here
6369 * which is *bad* because L2CAP has to be reliable.
6370 * But we don't have any other choice. L2CAP doesn't
6371 * provide flow control mechanism. */
6373 if (chan->imtu < skb->len)
6376 if (!chan->ops->recv(chan, skb))
6380 case L2CAP_MODE_ERTM:
6381 case L2CAP_MODE_STREAMING:
6382 l2cap_data_rcv(chan, skb);
6386 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6394 l2cap_chan_unlock(chan);
6397 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6398 struct sk_buff *skb)
6400 struct hci_conn *hcon = conn->hcon;
6401 struct l2cap_chan *chan;
6403 if (hcon->type != ACL_LINK)
6406 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6410 BT_DBG("chan %p, len %d", chan, skb->len);
6412 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6415 if (chan->imtu < skb->len)
6418 if (!chan->ops->recv(chan, skb))
6425 static void l2cap_att_channel(struct l2cap_conn *conn,
6426 struct sk_buff *skb)
6428 struct hci_conn *hcon = conn->hcon;
6429 struct l2cap_chan *chan;
6431 if (hcon->type != LE_LINK)
6434 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6435 conn->src, conn->dst);
6439 BT_DBG("chan %p, len %d", chan, skb->len);
6441 if (chan->imtu < skb->len)
6444 if (!chan->ops->recv(chan, skb))
6451 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6453 struct l2cap_hdr *lh = (void *) skb->data;
6457 skb_pull(skb, L2CAP_HDR_SIZE);
6458 cid = __le16_to_cpu(lh->cid);
6459 len = __le16_to_cpu(lh->len);
6461 if (len != skb->len) {
6466 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6469 case L2CAP_CID_SIGNALING:
6470 l2cap_sig_channel(conn, skb);
6473 case L2CAP_CID_CONN_LESS:
6474 psm = get_unaligned((__le16 *) skb->data);
6475 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6476 l2cap_conless_channel(conn, psm, skb);
6480 l2cap_att_channel(conn, skb);
6483 case L2CAP_CID_LE_SIGNALING:
6484 l2cap_le_sig_channel(conn, skb);
6488 if (smp_sig_channel(conn, skb))
6489 l2cap_conn_del(conn->hcon, EACCES);
6493 l2cap_data_channel(conn, cid, skb);
6498 /* ---- L2CAP interface with lower layer (HCI) ---- */
6500 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6502 int exact = 0, lm1 = 0, lm2 = 0;
6503 struct l2cap_chan *c;
6505 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6507 /* Find listening sockets and check their link_mode */
6508 read_lock(&chan_list_lock);
6509 list_for_each_entry(c, &chan_list, global_l) {
6510 struct sock *sk = c->sk;
6512 if (c->state != BT_LISTEN)
6515 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6516 lm1 |= HCI_LM_ACCEPT;
6517 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6518 lm1 |= HCI_LM_MASTER;
6520 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6521 lm2 |= HCI_LM_ACCEPT;
6522 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6523 lm2 |= HCI_LM_MASTER;
6526 read_unlock(&chan_list_lock);
6528 return exact ? lm1 : lm2;
6531 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6533 struct l2cap_conn *conn;
6535 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6538 conn = l2cap_conn_add(hcon);
6540 l2cap_conn_ready(conn);
6542 l2cap_conn_del(hcon, bt_to_errno(status));
6546 int l2cap_disconn_ind(struct hci_conn *hcon)
6548 struct l2cap_conn *conn = hcon->l2cap_data;
6550 BT_DBG("hcon %p", hcon);
6553 return HCI_ERROR_REMOTE_USER_TERM;
6554 return conn->disc_reason;
6557 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6559 BT_DBG("hcon %p reason %d", hcon, reason);
6561 l2cap_conn_del(hcon, bt_to_errno(reason));
6564 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6566 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6569 if (encrypt == 0x00) {
6570 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6571 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6572 } else if (chan->sec_level == BT_SECURITY_HIGH)
6573 l2cap_chan_close(chan, ECONNREFUSED);
6575 if (chan->sec_level == BT_SECURITY_MEDIUM)
6576 __clear_chan_timer(chan);
6580 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6582 struct l2cap_conn *conn = hcon->l2cap_data;
6583 struct l2cap_chan *chan;
6588 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6590 if (hcon->type == LE_LINK) {
6591 if (!status && encrypt)
6592 smp_distribute_keys(conn, 0);
6593 cancel_delayed_work(&conn->security_timer);
6596 mutex_lock(&conn->chan_lock);
6598 list_for_each_entry(chan, &conn->chan_l, list) {
6599 l2cap_chan_lock(chan);
6601 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6602 state_to_string(chan->state));
6604 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6605 l2cap_chan_unlock(chan);
6609 if (chan->scid == L2CAP_CID_ATT) {
6610 if (!status && encrypt) {
6611 chan->sec_level = hcon->sec_level;
6612 l2cap_chan_ready(chan);
6615 l2cap_chan_unlock(chan);
6619 if (!__l2cap_no_conn_pending(chan)) {
6620 l2cap_chan_unlock(chan);
6624 if (!status && (chan->state == BT_CONNECTED ||
6625 chan->state == BT_CONFIG)) {
6626 struct sock *sk = chan->sk;
6628 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6629 sk->sk_state_change(sk);
6631 l2cap_check_encryption(chan, encrypt);
6632 l2cap_chan_unlock(chan);
6636 if (chan->state == BT_CONNECT) {
6638 l2cap_start_connection(chan);
6640 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6642 } else if (chan->state == BT_CONNECT2) {
6643 struct sock *sk = chan->sk;
6644 struct l2cap_conn_rsp rsp;
6650 if (test_bit(BT_SK_DEFER_SETUP,
6651 &bt_sk(sk)->flags)) {
6652 res = L2CAP_CR_PEND;
6653 stat = L2CAP_CS_AUTHOR_PEND;
6654 chan->ops->defer(chan);
6656 __l2cap_state_change(chan, BT_CONFIG);
6657 res = L2CAP_CR_SUCCESS;
6658 stat = L2CAP_CS_NO_INFO;
6661 __l2cap_state_change(chan, BT_DISCONN);
6662 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6663 res = L2CAP_CR_SEC_BLOCK;
6664 stat = L2CAP_CS_NO_INFO;
6669 rsp.scid = cpu_to_le16(chan->dcid);
6670 rsp.dcid = cpu_to_le16(chan->scid);
6671 rsp.result = cpu_to_le16(res);
6672 rsp.status = cpu_to_le16(stat);
6673 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6676 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6677 res == L2CAP_CR_SUCCESS) {
6679 set_bit(CONF_REQ_SENT, &chan->conf_state);
6680 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6682 l2cap_build_conf_req(chan, buf),
6684 chan->num_conf_req++;
6688 l2cap_chan_unlock(chan);
6691 mutex_unlock(&conn->chan_lock);
6696 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6698 struct l2cap_conn *conn = hcon->l2cap_data;
6699 struct l2cap_hdr *hdr;
6702 /* For AMP controller do not create l2cap conn */
6703 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6707 conn = l2cap_conn_add(hcon);
6712 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6716 case ACL_START_NO_FLUSH:
6719 BT_ERR("Unexpected start frame (len %d)", skb->len);
6720 kfree_skb(conn->rx_skb);
6721 conn->rx_skb = NULL;
6723 l2cap_conn_unreliable(conn, ECOMM);
6726 /* Start fragment always begin with Basic L2CAP header */
6727 if (skb->len < L2CAP_HDR_SIZE) {
6728 BT_ERR("Frame is too short (len %d)", skb->len);
6729 l2cap_conn_unreliable(conn, ECOMM);
6733 hdr = (struct l2cap_hdr *) skb->data;
6734 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6736 if (len == skb->len) {
6737 /* Complete frame received */
6738 l2cap_recv_frame(conn, skb);
6742 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6744 if (skb->len > len) {
6745 BT_ERR("Frame is too long (len %d, expected len %d)",
6747 l2cap_conn_unreliable(conn, ECOMM);
6751 /* Allocate skb for the complete frame (with header) */
6752 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6756 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6758 conn->rx_len = len - skb->len;
6762 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6764 if (!conn->rx_len) {
6765 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6766 l2cap_conn_unreliable(conn, ECOMM);
6770 if (skb->len > conn->rx_len) {
6771 BT_ERR("Fragment is too long (len %d, expected %d)",
6772 skb->len, conn->rx_len);
6773 kfree_skb(conn->rx_skb);
6774 conn->rx_skb = NULL;
6776 l2cap_conn_unreliable(conn, ECOMM);
6780 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6782 conn->rx_len -= skb->len;
6784 if (!conn->rx_len) {
6785 /* Complete frame received */
6786 l2cap_recv_frame(conn, conn->rx_skb);
6787 conn->rx_skb = NULL;
6797 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6799 struct l2cap_chan *c;
6801 read_lock(&chan_list_lock);
6803 list_for_each_entry(c, &chan_list, global_l) {
6804 struct sock *sk = c->sk;
6806 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6807 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6808 c->state, __le16_to_cpu(c->psm),
6809 c->scid, c->dcid, c->imtu, c->omtu,
6810 c->sec_level, c->mode);
6813 read_unlock(&chan_list_lock);
6818 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6820 return single_open(file, l2cap_debugfs_show, inode->i_private);
6823 static const struct file_operations l2cap_debugfs_fops = {
6824 .open = l2cap_debugfs_open,
6826 .llseek = seq_lseek,
6827 .release = single_release,
6830 static struct dentry *l2cap_debugfs;
6832 int __init l2cap_init(void)
6836 err = l2cap_init_sockets();
6841 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6842 NULL, &l2cap_debugfs_fops);
6844 BT_ERR("Failed to create L2CAP debug file");
6850 void l2cap_exit(void)
6852 debugfs_remove(l2cap_debugfs);
6853 l2cap_cleanup_sockets();
6856 module_param(disable_ertm, bool, 0644);
6857 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");