2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
67 list_for_each_entry(c, &conn->chan_l, list) {
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
78 list_for_each_entry(c, &conn->chan_l, list) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
95 mutex_unlock(&conn->chan_lock);
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
102 struct l2cap_chan *c;
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
113 struct l2cap_chan *c;
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
126 write_lock(&chan_list_lock);
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
151 write_unlock(&chan_list_lock);
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
157 write_lock(&chan_list_lock);
161 write_unlock(&chan_list_lock);
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
184 chan->ops->state_change(chan, state);
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
189 struct sock *sk = chan->sk;
192 __l2cap_state_change(chan, state);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
198 struct sock *sk = chan->sk;
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
205 struct sock *sk = chan->sk;
208 __l2cap_chan_set_err(chan, err);
212 static void __set_retrans_timer(struct l2cap_chan *chan)
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
221 static void __set_monitor_timer(struct l2cap_chan *chan)
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
435 chan->sec_level = BT_SECURITY_LOW;
437 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
440 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
442 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
443 __le16_to_cpu(chan->psm), chan->dcid);
445 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
449 switch (chan->chan_type) {
450 case L2CAP_CHAN_CONN_ORIENTED:
451 if (conn->hcon->type == LE_LINK) {
453 chan->omtu = L2CAP_DEFAULT_MTU;
454 chan->scid = L2CAP_CID_LE_DATA;
455 chan->dcid = L2CAP_CID_LE_DATA;
457 /* Alloc CID for connection-oriented socket */
458 chan->scid = l2cap_alloc_cid(conn);
459 chan->omtu = L2CAP_DEFAULT_MTU;
463 case L2CAP_CHAN_CONN_LESS:
464 /* Connectionless socket */
465 chan->scid = L2CAP_CID_CONN_LESS;
466 chan->dcid = L2CAP_CID_CONN_LESS;
467 chan->omtu = L2CAP_DEFAULT_MTU;
470 case L2CAP_CHAN_CONN_FIX_A2MP:
471 chan->scid = L2CAP_CID_A2MP;
472 chan->dcid = L2CAP_CID_A2MP;
473 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
474 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
478 /* Raw socket can send/recv signalling messages only */
479 chan->scid = L2CAP_CID_SIGNALING;
480 chan->dcid = L2CAP_CID_SIGNALING;
481 chan->omtu = L2CAP_DEFAULT_MTU;
484 chan->local_id = L2CAP_BESTEFFORT_ID;
485 chan->local_stype = L2CAP_SERV_BESTEFFORT;
486 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
487 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
488 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
489 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
491 l2cap_chan_hold(chan);
493 list_add(&chan->list, &conn->chan_l);
496 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
498 mutex_lock(&conn->chan_lock);
499 __l2cap_chan_add(conn, chan);
500 mutex_unlock(&conn->chan_lock);
503 void l2cap_chan_del(struct l2cap_chan *chan, int err)
505 struct l2cap_conn *conn = chan->conn;
507 __clear_chan_timer(chan);
509 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
512 /* Delete from channel list */
513 list_del(&chan->list);
515 l2cap_chan_put(chan);
519 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
520 hci_conn_put(conn->hcon);
523 if (chan->ops->teardown)
524 chan->ops->teardown(chan, err);
526 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
530 case L2CAP_MODE_BASIC:
533 case L2CAP_MODE_ERTM:
534 __clear_retrans_timer(chan);
535 __clear_monitor_timer(chan);
536 __clear_ack_timer(chan);
538 skb_queue_purge(&chan->srej_q);
540 l2cap_seq_list_free(&chan->srej_list);
541 l2cap_seq_list_free(&chan->retrans_list);
545 case L2CAP_MODE_STREAMING:
546 skb_queue_purge(&chan->tx_q);
553 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
555 struct l2cap_conn *conn = chan->conn;
556 struct sock *sk = chan->sk;
558 BT_DBG("chan %p state %s sk %p", chan,
559 state_to_string(chan->state), sk);
561 switch (chan->state) {
563 if (chan->ops->teardown)
564 chan->ops->teardown(chan, 0);
569 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
570 conn->hcon->type == ACL_LINK) {
571 __set_chan_timer(chan, sk->sk_sndtimeo);
572 l2cap_send_disconn_req(conn, chan, reason);
574 l2cap_chan_del(chan, reason);
578 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
579 conn->hcon->type == ACL_LINK) {
580 struct l2cap_conn_rsp rsp;
583 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
584 result = L2CAP_CR_SEC_BLOCK;
586 result = L2CAP_CR_BAD_PSM;
587 l2cap_state_change(chan, BT_DISCONN);
589 rsp.scid = cpu_to_le16(chan->dcid);
590 rsp.dcid = cpu_to_le16(chan->scid);
591 rsp.result = cpu_to_le16(result);
592 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
593 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
597 l2cap_chan_del(chan, reason);
602 l2cap_chan_del(chan, reason);
606 if (chan->ops->teardown)
607 chan->ops->teardown(chan, 0);
612 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
614 if (chan->chan_type == L2CAP_CHAN_RAW) {
615 switch (chan->sec_level) {
616 case BT_SECURITY_HIGH:
617 return HCI_AT_DEDICATED_BONDING_MITM;
618 case BT_SECURITY_MEDIUM:
619 return HCI_AT_DEDICATED_BONDING;
621 return HCI_AT_NO_BONDING;
623 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
624 if (chan->sec_level == BT_SECURITY_LOW)
625 chan->sec_level = BT_SECURITY_SDP;
627 if (chan->sec_level == BT_SECURITY_HIGH)
628 return HCI_AT_NO_BONDING_MITM;
630 return HCI_AT_NO_BONDING;
632 switch (chan->sec_level) {
633 case BT_SECURITY_HIGH:
634 return HCI_AT_GENERAL_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_GENERAL_BONDING;
638 return HCI_AT_NO_BONDING;
643 /* Service level security */
644 int l2cap_chan_check_security(struct l2cap_chan *chan)
646 struct l2cap_conn *conn = chan->conn;
649 auth_type = l2cap_get_auth_type(chan);
651 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
654 static u8 l2cap_get_ident(struct l2cap_conn *conn)
658 /* Get next available identificator.
659 * 1 - 128 are used by kernel.
660 * 129 - 199 are reserved.
661 * 200 - 254 are used by utilities like l2ping, etc.
664 spin_lock(&conn->lock);
666 if (++conn->tx_ident > 128)
671 spin_unlock(&conn->lock);
676 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
678 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
681 BT_DBG("code 0x%2.2x", code);
686 if (lmp_no_flush_capable(conn->hcon->hdev))
687 flags = ACL_START_NO_FLUSH;
691 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
692 skb->priority = HCI_PRIO_MAX;
694 hci_send_acl(conn->hchan, skb, flags);
697 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
699 struct hci_conn *hcon = chan->conn->hcon;
702 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
705 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
706 lmp_no_flush_capable(hcon->hdev))
707 flags = ACL_START_NO_FLUSH;
711 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
712 hci_send_acl(chan->conn->hchan, skb, flags);
715 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
717 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
718 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
720 if (enh & L2CAP_CTRL_FRAME_TYPE) {
723 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
724 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
731 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
732 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
739 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
741 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
742 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
744 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
747 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
748 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
755 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
756 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
763 static inline void __unpack_control(struct l2cap_chan *chan,
766 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
767 __unpack_extended_control(get_unaligned_le32(skb->data),
768 &bt_cb(skb)->control);
769 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
771 __unpack_enhanced_control(get_unaligned_le16(skb->data),
772 &bt_cb(skb)->control);
773 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
777 static u32 __pack_extended_control(struct l2cap_ctrl *control)
781 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
782 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
784 if (control->sframe) {
785 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
786 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
787 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
789 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
790 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
796 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
800 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_CTRL_FRAME_TYPE;
808 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
815 static inline void __pack_control(struct l2cap_chan *chan,
816 struct l2cap_ctrl *control,
819 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
820 put_unaligned_le32(__pack_extended_control(control),
821 skb->data + L2CAP_HDR_SIZE);
823 put_unaligned_le16(__pack_enhanced_control(control),
824 skb->data + L2CAP_HDR_SIZE);
828 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
830 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
831 return L2CAP_EXT_HDR_SIZE;
833 return L2CAP_ENH_HDR_SIZE;
836 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
840 struct l2cap_hdr *lh;
841 int hlen = __ertm_hdr_size(chan);
843 if (chan->fcs == L2CAP_FCS_CRC16)
844 hlen += L2CAP_FCS_SIZE;
846 skb = bt_skb_alloc(hlen, GFP_KERNEL);
849 return ERR_PTR(-ENOMEM);
851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
852 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
853 lh->cid = cpu_to_le16(chan->dcid);
855 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
856 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
858 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
860 if (chan->fcs == L2CAP_FCS_CRC16) {
861 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
862 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
865 skb->priority = HCI_PRIO_MAX;
869 static void l2cap_send_sframe(struct l2cap_chan *chan,
870 struct l2cap_ctrl *control)
875 BT_DBG("chan %p, control %p", chan, control);
877 if (!control->sframe)
880 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
884 if (control->super == L2CAP_SUPER_RR)
885 clear_bit(CONN_RNR_SENT, &chan->conn_state);
886 else if (control->super == L2CAP_SUPER_RNR)
887 set_bit(CONN_RNR_SENT, &chan->conn_state);
889 if (control->super != L2CAP_SUPER_SREJ) {
890 chan->last_acked_seq = control->reqseq;
891 __clear_ack_timer(chan);
894 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
895 control->final, control->poll, control->super);
897 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
898 control_field = __pack_extended_control(control);
900 control_field = __pack_enhanced_control(control);
902 skb = l2cap_create_sframe_pdu(chan, control_field);
904 l2cap_do_send(chan, skb);
907 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
909 struct l2cap_ctrl control;
911 BT_DBG("chan %p, poll %d", chan, poll);
913 memset(&control, 0, sizeof(control));
917 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
918 control.super = L2CAP_SUPER_RNR;
920 control.super = L2CAP_SUPER_RR;
922 control.reqseq = chan->buffer_seq;
923 l2cap_send_sframe(chan, &control);
926 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
928 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
931 static void l2cap_send_conn_req(struct l2cap_chan *chan)
933 struct l2cap_conn *conn = chan->conn;
934 struct l2cap_conn_req req;
936 req.scid = cpu_to_le16(chan->scid);
939 chan->ident = l2cap_get_ident(conn);
941 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
943 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
946 static void l2cap_chan_ready(struct l2cap_chan *chan)
948 /* This clears all conf flags, including CONF_NOT_COMPLETE */
949 chan->conf_state = 0;
950 __clear_chan_timer(chan);
952 chan->state = BT_CONNECTED;
954 chan->ops->ready(chan);
957 static void l2cap_do_start(struct l2cap_chan *chan)
959 struct l2cap_conn *conn = chan->conn;
961 if (conn->hcon->type == LE_LINK) {
962 l2cap_chan_ready(chan);
966 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
967 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
970 if (l2cap_chan_check_security(chan) &&
971 __l2cap_no_conn_pending(chan))
972 l2cap_send_conn_req(chan);
974 struct l2cap_info_req req;
975 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
978 conn->info_ident = l2cap_get_ident(conn);
980 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
982 l2cap_send_cmd(conn, conn->info_ident,
983 L2CAP_INFO_REQ, sizeof(req), &req);
987 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
989 u32 local_feat_mask = l2cap_feat_mask;
991 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
994 case L2CAP_MODE_ERTM:
995 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
996 case L2CAP_MODE_STREAMING:
997 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1003 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1005 struct sock *sk = chan->sk;
1006 struct l2cap_disconn_req req;
1011 if (chan->mode == L2CAP_MODE_ERTM) {
1012 __clear_retrans_timer(chan);
1013 __clear_monitor_timer(chan);
1014 __clear_ack_timer(chan);
1017 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1018 __l2cap_state_change(chan, BT_DISCONN);
1022 req.dcid = cpu_to_le16(chan->dcid);
1023 req.scid = cpu_to_le16(chan->scid);
1024 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1025 L2CAP_DISCONN_REQ, sizeof(req), &req);
1028 __l2cap_state_change(chan, BT_DISCONN);
1029 __l2cap_chan_set_err(chan, err);
1033 /* ---- L2CAP connections ---- */
1034 static void l2cap_conn_start(struct l2cap_conn *conn)
1036 struct l2cap_chan *chan, *tmp;
1038 BT_DBG("conn %p", conn);
1040 mutex_lock(&conn->chan_lock);
1042 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1043 struct sock *sk = chan->sk;
1045 l2cap_chan_lock(chan);
1047 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1048 l2cap_chan_unlock(chan);
1052 if (chan->state == BT_CONNECT) {
1053 if (!l2cap_chan_check_security(chan) ||
1054 !__l2cap_no_conn_pending(chan)) {
1055 l2cap_chan_unlock(chan);
1059 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1060 && test_bit(CONF_STATE2_DEVICE,
1061 &chan->conf_state)) {
1062 l2cap_chan_close(chan, ECONNRESET);
1063 l2cap_chan_unlock(chan);
1067 l2cap_send_conn_req(chan);
1069 } else if (chan->state == BT_CONNECT2) {
1070 struct l2cap_conn_rsp rsp;
1072 rsp.scid = cpu_to_le16(chan->dcid);
1073 rsp.dcid = cpu_to_le16(chan->scid);
1075 if (l2cap_chan_check_security(chan)) {
1077 if (test_bit(BT_SK_DEFER_SETUP,
1078 &bt_sk(sk)->flags)) {
1079 struct sock *parent = bt_sk(sk)->parent;
1080 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1081 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1083 parent->sk_data_ready(parent, 0);
1086 __l2cap_state_change(chan, BT_CONFIG);
1087 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1088 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1092 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1093 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1096 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1099 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1100 rsp.result != L2CAP_CR_SUCCESS) {
1101 l2cap_chan_unlock(chan);
1105 set_bit(CONF_REQ_SENT, &chan->conf_state);
1106 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1107 l2cap_build_conf_req(chan, buf), buf);
1108 chan->num_conf_req++;
1111 l2cap_chan_unlock(chan);
1114 mutex_unlock(&conn->chan_lock);
1117 /* Find socket with cid and source/destination bdaddr.
1118 * Returns closest match, locked.
1120 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1124 struct l2cap_chan *c, *c1 = NULL;
1126 read_lock(&chan_list_lock);
1128 list_for_each_entry(c, &chan_list, global_l) {
1129 struct sock *sk = c->sk;
1131 if (state && c->state != state)
1134 if (c->scid == cid) {
1135 int src_match, dst_match;
1136 int src_any, dst_any;
1139 src_match = !bacmp(&bt_sk(sk)->src, src);
1140 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1141 if (src_match && dst_match) {
1142 read_unlock(&chan_list_lock);
1147 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1148 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1149 if ((src_match && dst_any) || (src_any && dst_match) ||
1150 (src_any && dst_any))
1155 read_unlock(&chan_list_lock);
1160 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1162 struct sock *parent, *sk;
1163 struct l2cap_chan *chan, *pchan;
1167 /* Check if we have socket listening on cid */
1168 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1169 conn->src, conn->dst);
1177 chan = pchan->ops->new_connection(pchan);
1183 hci_conn_hold(conn->hcon);
1185 bacpy(&bt_sk(sk)->src, conn->src);
1186 bacpy(&bt_sk(sk)->dst, conn->dst);
1188 bt_accept_enqueue(parent, sk);
1190 l2cap_chan_add(conn, chan);
1192 l2cap_chan_ready(chan);
1195 release_sock(parent);
1198 static void l2cap_conn_ready(struct l2cap_conn *conn)
1200 struct l2cap_chan *chan;
1202 BT_DBG("conn %p", conn);
1204 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1205 l2cap_le_conn_ready(conn);
1207 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1208 smp_conn_security(conn, conn->hcon->pending_sec_level);
1210 mutex_lock(&conn->chan_lock);
1212 list_for_each_entry(chan, &conn->chan_l, list) {
1214 l2cap_chan_lock(chan);
1216 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1217 l2cap_chan_unlock(chan);
1221 if (conn->hcon->type == LE_LINK) {
1222 if (smp_conn_security(conn, chan->sec_level))
1223 l2cap_chan_ready(chan);
1225 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1226 struct sock *sk = chan->sk;
1227 __clear_chan_timer(chan);
1229 __l2cap_state_change(chan, BT_CONNECTED);
1230 sk->sk_state_change(sk);
1233 } else if (chan->state == BT_CONNECT)
1234 l2cap_do_start(chan);
1236 l2cap_chan_unlock(chan);
1239 mutex_unlock(&conn->chan_lock);
1242 /* Notify sockets that we cannot guaranty reliability anymore */
1243 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1245 struct l2cap_chan *chan;
1247 BT_DBG("conn %p", conn);
1249 mutex_lock(&conn->chan_lock);
1251 list_for_each_entry(chan, &conn->chan_l, list) {
1252 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1253 __l2cap_chan_set_err(chan, err);
1256 mutex_unlock(&conn->chan_lock);
1259 static void l2cap_info_timeout(struct work_struct *work)
1261 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1264 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1265 conn->info_ident = 0;
1267 l2cap_conn_start(conn);
1270 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1272 struct l2cap_conn *conn = hcon->l2cap_data;
1273 struct l2cap_chan *chan, *l;
1278 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1280 kfree_skb(conn->rx_skb);
1282 mutex_lock(&conn->chan_lock);
1285 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1286 l2cap_chan_hold(chan);
1287 l2cap_chan_lock(chan);
1289 l2cap_chan_del(chan, err);
1291 l2cap_chan_unlock(chan);
1293 chan->ops->close(chan);
1294 l2cap_chan_put(chan);
1297 mutex_unlock(&conn->chan_lock);
1299 hci_chan_del(conn->hchan);
1301 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1302 cancel_delayed_work_sync(&conn->info_timer);
1304 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1305 cancel_delayed_work_sync(&conn->security_timer);
1306 smp_chan_destroy(conn);
1309 hcon->l2cap_data = NULL;
1313 static void security_timeout(struct work_struct *work)
1315 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1316 security_timer.work);
1318 BT_DBG("conn %p", conn);
1320 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1321 smp_chan_destroy(conn);
1322 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1326 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1328 struct l2cap_conn *conn = hcon->l2cap_data;
1329 struct hci_chan *hchan;
1334 hchan = hci_chan_create(hcon);
1338 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1340 hci_chan_del(hchan);
1344 hcon->l2cap_data = conn;
1346 conn->hchan = hchan;
1348 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1350 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1351 conn->mtu = hcon->hdev->le_mtu;
1353 conn->mtu = hcon->hdev->acl_mtu;
1355 conn->src = &hcon->hdev->bdaddr;
1356 conn->dst = &hcon->dst;
1358 conn->feat_mask = 0;
1360 spin_lock_init(&conn->lock);
1361 mutex_init(&conn->chan_lock);
1363 INIT_LIST_HEAD(&conn->chan_l);
1365 if (hcon->type == LE_LINK)
1366 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1368 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1370 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1375 /* ---- Socket interface ---- */
1377 /* Find socket with psm and source / destination bdaddr.
1378 * Returns closest match.
1380 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1384 struct l2cap_chan *c, *c1 = NULL;
1386 read_lock(&chan_list_lock);
1388 list_for_each_entry(c, &chan_list, global_l) {
1389 struct sock *sk = c->sk;
1391 if (state && c->state != state)
1394 if (c->psm == psm) {
1395 int src_match, dst_match;
1396 int src_any, dst_any;
1399 src_match = !bacmp(&bt_sk(sk)->src, src);
1400 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1401 if (src_match && dst_match) {
1402 read_unlock(&chan_list_lock);
1407 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1408 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1409 if ((src_match && dst_any) || (src_any && dst_match) ||
1410 (src_any && dst_any))
1415 read_unlock(&chan_list_lock);
1420 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1421 bdaddr_t *dst, u8 dst_type)
1423 struct sock *sk = chan->sk;
1424 bdaddr_t *src = &bt_sk(sk)->src;
1425 struct l2cap_conn *conn;
1426 struct hci_conn *hcon;
1427 struct hci_dev *hdev;
1431 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1432 dst_type, __le16_to_cpu(chan->psm));
1434 hdev = hci_get_route(dst, src);
1436 return -EHOSTUNREACH;
1440 l2cap_chan_lock(chan);
1442 /* PSM must be odd and lsb of upper byte must be 0 */
1443 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1444 chan->chan_type != L2CAP_CHAN_RAW) {
1449 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1454 switch (chan->mode) {
1455 case L2CAP_MODE_BASIC:
1457 case L2CAP_MODE_ERTM:
1458 case L2CAP_MODE_STREAMING:
1467 switch (chan->state) {
1471 /* Already connecting */
1476 /* Already connected */
1490 /* Set destination address and psm */
1492 bacpy(&bt_sk(sk)->dst, dst);
1498 auth_type = l2cap_get_auth_type(chan);
1500 if (chan->dcid == L2CAP_CID_LE_DATA)
1501 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1502 chan->sec_level, auth_type);
1504 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1505 chan->sec_level, auth_type);
1508 err = PTR_ERR(hcon);
1512 conn = l2cap_conn_add(hcon, 0);
1519 if (hcon->type == LE_LINK) {
1522 if (!list_empty(&conn->chan_l)) {
1531 /* Update source addr of the socket */
1532 bacpy(src, conn->src);
1534 l2cap_chan_unlock(chan);
1535 l2cap_chan_add(conn, chan);
1536 l2cap_chan_lock(chan);
1538 l2cap_state_change(chan, BT_CONNECT);
1539 __set_chan_timer(chan, sk->sk_sndtimeo);
1541 if (hcon->state == BT_CONNECTED) {
1542 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1543 __clear_chan_timer(chan);
1544 if (l2cap_chan_check_security(chan))
1545 l2cap_state_change(chan, BT_CONNECTED);
1547 l2cap_do_start(chan);
1553 l2cap_chan_unlock(chan);
1554 hci_dev_unlock(hdev);
1559 int __l2cap_wait_ack(struct sock *sk)
1561 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1562 DECLARE_WAITQUEUE(wait, current);
1566 add_wait_queue(sk_sleep(sk), &wait);
1567 set_current_state(TASK_INTERRUPTIBLE);
1568 while (chan->unacked_frames > 0 && chan->conn) {
1572 if (signal_pending(current)) {
1573 err = sock_intr_errno(timeo);
1578 timeo = schedule_timeout(timeo);
1580 set_current_state(TASK_INTERRUPTIBLE);
1582 err = sock_error(sk);
1586 set_current_state(TASK_RUNNING);
1587 remove_wait_queue(sk_sleep(sk), &wait);
1591 static void l2cap_monitor_timeout(struct work_struct *work)
1593 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1594 monitor_timer.work);
1596 BT_DBG("chan %p", chan);
1598 l2cap_chan_lock(chan);
1601 l2cap_chan_unlock(chan);
1602 l2cap_chan_put(chan);
1606 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1608 l2cap_chan_unlock(chan);
1609 l2cap_chan_put(chan);
1612 static void l2cap_retrans_timeout(struct work_struct *work)
1614 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1615 retrans_timer.work);
1617 BT_DBG("chan %p", chan);
1619 l2cap_chan_lock(chan);
1622 l2cap_chan_unlock(chan);
1623 l2cap_chan_put(chan);
1627 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1628 l2cap_chan_unlock(chan);
1629 l2cap_chan_put(chan);
1632 static void l2cap_streaming_send(struct l2cap_chan *chan,
1633 struct sk_buff_head *skbs)
1635 struct sk_buff *skb;
1636 struct l2cap_ctrl *control;
1638 BT_DBG("chan %p, skbs %p", chan, skbs);
1640 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1642 while (!skb_queue_empty(&chan->tx_q)) {
1644 skb = skb_dequeue(&chan->tx_q);
1646 bt_cb(skb)->control.retries = 1;
1647 control = &bt_cb(skb)->control;
1649 control->reqseq = 0;
1650 control->txseq = chan->next_tx_seq;
1652 __pack_control(chan, control, skb);
1654 if (chan->fcs == L2CAP_FCS_CRC16) {
1655 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1656 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1659 l2cap_do_send(chan, skb);
1661 BT_DBG("Sent txseq %u", control->txseq);
1663 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1664 chan->frames_sent++;
1668 static int l2cap_ertm_send(struct l2cap_chan *chan)
1670 struct sk_buff *skb, *tx_skb;
1671 struct l2cap_ctrl *control;
1674 BT_DBG("chan %p", chan);
1676 if (chan->state != BT_CONNECTED)
1679 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1682 while (chan->tx_send_head &&
1683 chan->unacked_frames < chan->remote_tx_win &&
1684 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1686 skb = chan->tx_send_head;
1688 bt_cb(skb)->control.retries = 1;
1689 control = &bt_cb(skb)->control;
1691 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1694 control->reqseq = chan->buffer_seq;
1695 chan->last_acked_seq = chan->buffer_seq;
1696 control->txseq = chan->next_tx_seq;
1698 __pack_control(chan, control, skb);
1700 if (chan->fcs == L2CAP_FCS_CRC16) {
1701 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1702 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1705 /* Clone after data has been modified. Data is assumed to be
1706 read-only (for locking purposes) on cloned sk_buffs.
1708 tx_skb = skb_clone(skb, GFP_KERNEL);
1713 __set_retrans_timer(chan);
1715 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1716 chan->unacked_frames++;
1717 chan->frames_sent++;
1720 if (skb_queue_is_last(&chan->tx_q, skb))
1721 chan->tx_send_head = NULL;
1723 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1725 l2cap_do_send(chan, tx_skb);
1726 BT_DBG("Sent txseq %u", control->txseq);
1729 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1730 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1735 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1737 struct l2cap_ctrl control;
1738 struct sk_buff *skb;
1739 struct sk_buff *tx_skb;
1742 BT_DBG("chan %p", chan);
1744 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1747 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1748 seq = l2cap_seq_list_pop(&chan->retrans_list);
1750 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1752 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1757 bt_cb(skb)->control.retries++;
1758 control = bt_cb(skb)->control;
1760 if (chan->max_tx != 0 &&
1761 bt_cb(skb)->control.retries > chan->max_tx) {
1762 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1763 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1764 l2cap_seq_list_clear(&chan->retrans_list);
1768 control.reqseq = chan->buffer_seq;
1769 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1774 if (skb_cloned(skb)) {
1775 /* Cloned sk_buffs are read-only, so we need a
1778 tx_skb = skb_copy(skb, GFP_ATOMIC);
1780 tx_skb = skb_clone(skb, GFP_ATOMIC);
1784 l2cap_seq_list_clear(&chan->retrans_list);
1788 /* Update skb contents */
1789 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1790 put_unaligned_le32(__pack_extended_control(&control),
1791 tx_skb->data + L2CAP_HDR_SIZE);
1793 put_unaligned_le16(__pack_enhanced_control(&control),
1794 tx_skb->data + L2CAP_HDR_SIZE);
1797 if (chan->fcs == L2CAP_FCS_CRC16) {
1798 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1799 put_unaligned_le16(fcs, skb_put(tx_skb,
1803 l2cap_do_send(chan, tx_skb);
1805 BT_DBG("Resent txseq %d", control.txseq);
1807 chan->last_acked_seq = chan->buffer_seq;
1811 static void l2cap_retransmit(struct l2cap_chan *chan,
1812 struct l2cap_ctrl *control)
1814 BT_DBG("chan %p, control %p", chan, control);
1816 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1817 l2cap_ertm_resend(chan);
1820 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1821 struct l2cap_ctrl *control)
1823 struct sk_buff *skb;
1825 BT_DBG("chan %p, control %p", chan, control);
1828 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1830 l2cap_seq_list_clear(&chan->retrans_list);
1832 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1835 if (chan->unacked_frames) {
1836 skb_queue_walk(&chan->tx_q, skb) {
1837 if (bt_cb(skb)->control.txseq == control->reqseq ||
1838 skb == chan->tx_send_head)
1842 skb_queue_walk_from(&chan->tx_q, skb) {
1843 if (skb == chan->tx_send_head)
1846 l2cap_seq_list_append(&chan->retrans_list,
1847 bt_cb(skb)->control.txseq);
1850 l2cap_ertm_resend(chan);
1854 static void l2cap_send_ack(struct l2cap_chan *chan)
1856 struct l2cap_ctrl control;
1857 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1858 chan->last_acked_seq);
1861 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1862 chan, chan->last_acked_seq, chan->buffer_seq);
1864 memset(&control, 0, sizeof(control));
1867 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1868 chan->rx_state == L2CAP_RX_STATE_RECV) {
1869 __clear_ack_timer(chan);
1870 control.super = L2CAP_SUPER_RNR;
1871 control.reqseq = chan->buffer_seq;
1872 l2cap_send_sframe(chan, &control);
1874 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1875 l2cap_ertm_send(chan);
1876 /* If any i-frames were sent, they included an ack */
1877 if (chan->buffer_seq == chan->last_acked_seq)
1881 /* Ack now if the window is 3/4ths full.
1882 * Calculate without mul or div
1884 threshold = chan->ack_win;
1885 threshold += threshold << 1;
1888 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1891 if (frames_to_ack >= threshold) {
1892 __clear_ack_timer(chan);
1893 control.super = L2CAP_SUPER_RR;
1894 control.reqseq = chan->buffer_seq;
1895 l2cap_send_sframe(chan, &control);
1900 __set_ack_timer(chan);
1904 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1905 struct msghdr *msg, int len,
1906 int count, struct sk_buff *skb)
1908 struct l2cap_conn *conn = chan->conn;
1909 struct sk_buff **frag;
1912 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1918 /* Continuation fragments (no L2CAP header) */
1919 frag = &skb_shinfo(skb)->frag_list;
1921 struct sk_buff *tmp;
1923 count = min_t(unsigned int, conn->mtu, len);
1925 tmp = chan->ops->alloc_skb(chan, count,
1926 msg->msg_flags & MSG_DONTWAIT);
1928 return PTR_ERR(tmp);
1932 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1935 (*frag)->priority = skb->priority;
1940 skb->len += (*frag)->len;
1941 skb->data_len += (*frag)->len;
1943 frag = &(*frag)->next;
1949 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1950 struct msghdr *msg, size_t len,
1953 struct l2cap_conn *conn = chan->conn;
1954 struct sk_buff *skb;
1955 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1956 struct l2cap_hdr *lh;
1958 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1960 count = min_t(unsigned int, (conn->mtu - hlen), len);
1962 skb = chan->ops->alloc_skb(chan, count + hlen,
1963 msg->msg_flags & MSG_DONTWAIT);
1967 skb->priority = priority;
1969 /* Create L2CAP header */
1970 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1971 lh->cid = cpu_to_le16(chan->dcid);
1972 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1973 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1975 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1976 if (unlikely(err < 0)) {
1978 return ERR_PTR(err);
1983 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1984 struct msghdr *msg, size_t len,
1987 struct l2cap_conn *conn = chan->conn;
1988 struct sk_buff *skb;
1990 struct l2cap_hdr *lh;
1992 BT_DBG("chan %p len %zu", chan, len);
1994 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1996 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1997 msg->msg_flags & MSG_DONTWAIT);
2001 skb->priority = priority;
2003 /* Create L2CAP header */
2004 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2005 lh->cid = cpu_to_le16(chan->dcid);
2006 lh->len = cpu_to_le16(len);
2008 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2009 if (unlikely(err < 0)) {
2011 return ERR_PTR(err);
2016 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2017 struct msghdr *msg, size_t len,
2020 struct l2cap_conn *conn = chan->conn;
2021 struct sk_buff *skb;
2022 int err, count, hlen;
2023 struct l2cap_hdr *lh;
2025 BT_DBG("chan %p len %zu", chan, len);
2028 return ERR_PTR(-ENOTCONN);
2030 hlen = __ertm_hdr_size(chan);
2033 hlen += L2CAP_SDULEN_SIZE;
2035 if (chan->fcs == L2CAP_FCS_CRC16)
2036 hlen += L2CAP_FCS_SIZE;
2038 count = min_t(unsigned int, (conn->mtu - hlen), len);
2040 skb = chan->ops->alloc_skb(chan, count + hlen,
2041 msg->msg_flags & MSG_DONTWAIT);
2045 /* Create L2CAP header */
2046 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2047 lh->cid = cpu_to_le16(chan->dcid);
2048 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2050 /* Control header is populated later */
2051 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2052 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2054 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2057 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2059 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2060 if (unlikely(err < 0)) {
2062 return ERR_PTR(err);
2065 bt_cb(skb)->control.fcs = chan->fcs;
2066 bt_cb(skb)->control.retries = 0;
2070 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2071 struct sk_buff_head *seg_queue,
2072 struct msghdr *msg, size_t len)
2074 struct sk_buff *skb;
2079 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2081 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2082 * so fragmented skbs are not used. The HCI layer's handling
2083 * of fragmented skbs is not compatible with ERTM's queueing.
2086 /* PDU size is derived from the HCI MTU */
2087 pdu_len = chan->conn->mtu;
2089 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2091 /* Adjust for largest possible L2CAP overhead. */
2093 pdu_len -= L2CAP_FCS_SIZE;
2095 pdu_len -= __ertm_hdr_size(chan);
2097 /* Remote device may have requested smaller PDUs */
2098 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2100 if (len <= pdu_len) {
2101 sar = L2CAP_SAR_UNSEGMENTED;
2105 sar = L2CAP_SAR_START;
2107 pdu_len -= L2CAP_SDULEN_SIZE;
2111 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2114 __skb_queue_purge(seg_queue);
2115 return PTR_ERR(skb);
2118 bt_cb(skb)->control.sar = sar;
2119 __skb_queue_tail(seg_queue, skb);
2124 pdu_len += L2CAP_SDULEN_SIZE;
2127 if (len <= pdu_len) {
2128 sar = L2CAP_SAR_END;
2131 sar = L2CAP_SAR_CONTINUE;
2138 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2141 struct sk_buff *skb;
2143 struct sk_buff_head seg_queue;
2145 /* Connectionless channel */
2146 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2147 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2149 return PTR_ERR(skb);
2151 l2cap_do_send(chan, skb);
2155 switch (chan->mode) {
2156 case L2CAP_MODE_BASIC:
2157 /* Check outgoing MTU */
2158 if (len > chan->omtu)
2161 /* Create a basic PDU */
2162 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2164 return PTR_ERR(skb);
2166 l2cap_do_send(chan, skb);
2170 case L2CAP_MODE_ERTM:
2171 case L2CAP_MODE_STREAMING:
2172 /* Check outgoing MTU */
2173 if (len > chan->omtu) {
2178 __skb_queue_head_init(&seg_queue);
2180 /* Do segmentation before calling in to the state machine,
2181 * since it's possible to block while waiting for memory
2184 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2186 /* The channel could have been closed while segmenting,
2187 * check that it is still connected.
2189 if (chan->state != BT_CONNECTED) {
2190 __skb_queue_purge(&seg_queue);
2197 if (chan->mode == L2CAP_MODE_ERTM)
2198 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2200 l2cap_streaming_send(chan, &seg_queue);
2204 /* If the skbs were not queued for sending, they'll still be in
2205 * seg_queue and need to be purged.
2207 __skb_queue_purge(&seg_queue);
2211 BT_DBG("bad state %1.1x", chan->mode);
2218 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2220 struct l2cap_ctrl control;
2223 BT_DBG("chan %p, txseq %u", chan, txseq);
2225 memset(&control, 0, sizeof(control));
2227 control.super = L2CAP_SUPER_SREJ;
2229 for (seq = chan->expected_tx_seq; seq != txseq;
2230 seq = __next_seq(chan, seq)) {
2231 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2232 control.reqseq = seq;
2233 l2cap_send_sframe(chan, &control);
2234 l2cap_seq_list_append(&chan->srej_list, seq);
2238 chan->expected_tx_seq = __next_seq(chan, txseq);
2241 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2243 struct l2cap_ctrl control;
2245 BT_DBG("chan %p", chan);
2247 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2250 memset(&control, 0, sizeof(control));
2252 control.super = L2CAP_SUPER_SREJ;
2253 control.reqseq = chan->srej_list.tail;
2254 l2cap_send_sframe(chan, &control);
2257 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2259 struct l2cap_ctrl control;
2263 BT_DBG("chan %p, txseq %u", chan, txseq);
2265 memset(&control, 0, sizeof(control));
2267 control.super = L2CAP_SUPER_SREJ;
2269 /* Capture initial list head to allow only one pass through the list. */
2270 initial_head = chan->srej_list.head;
2273 seq = l2cap_seq_list_pop(&chan->srej_list);
2274 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2277 control.reqseq = seq;
2278 l2cap_send_sframe(chan, &control);
2279 l2cap_seq_list_append(&chan->srej_list, seq);
2280 } while (chan->srej_list.head != initial_head);
2283 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2285 struct sk_buff *acked_skb;
2288 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2290 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2293 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2294 chan->expected_ack_seq, chan->unacked_frames);
2296 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2297 ackseq = __next_seq(chan, ackseq)) {
2299 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2301 skb_unlink(acked_skb, &chan->tx_q);
2302 kfree_skb(acked_skb);
2303 chan->unacked_frames--;
2307 chan->expected_ack_seq = reqseq;
2309 if (chan->unacked_frames == 0)
2310 __clear_retrans_timer(chan);
2312 BT_DBG("unacked_frames %u", chan->unacked_frames);
2315 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2317 BT_DBG("chan %p", chan);
2319 chan->expected_tx_seq = chan->buffer_seq;
2320 l2cap_seq_list_clear(&chan->srej_list);
2321 skb_queue_purge(&chan->srej_q);
2322 chan->rx_state = L2CAP_RX_STATE_RECV;
2325 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2326 struct l2cap_ctrl *control,
2327 struct sk_buff_head *skbs, u8 event)
2329 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2333 case L2CAP_EV_DATA_REQUEST:
2334 if (chan->tx_send_head == NULL)
2335 chan->tx_send_head = skb_peek(skbs);
2337 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2338 l2cap_ertm_send(chan);
2340 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2341 BT_DBG("Enter LOCAL_BUSY");
2342 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2344 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2345 /* The SREJ_SENT state must be aborted if we are to
2346 * enter the LOCAL_BUSY state.
2348 l2cap_abort_rx_srej_sent(chan);
2351 l2cap_send_ack(chan);
2354 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2355 BT_DBG("Exit LOCAL_BUSY");
2356 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2358 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2359 struct l2cap_ctrl local_control;
2361 memset(&local_control, 0, sizeof(local_control));
2362 local_control.sframe = 1;
2363 local_control.super = L2CAP_SUPER_RR;
2364 local_control.poll = 1;
2365 local_control.reqseq = chan->buffer_seq;
2366 l2cap_send_sframe(chan, &local_control);
2368 chan->retry_count = 1;
2369 __set_monitor_timer(chan);
2370 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2373 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2374 l2cap_process_reqseq(chan, control->reqseq);
2376 case L2CAP_EV_EXPLICIT_POLL:
2377 l2cap_send_rr_or_rnr(chan, 1);
2378 chan->retry_count = 1;
2379 __set_monitor_timer(chan);
2380 __clear_ack_timer(chan);
2381 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2383 case L2CAP_EV_RETRANS_TO:
2384 l2cap_send_rr_or_rnr(chan, 1);
2385 chan->retry_count = 1;
2386 __set_monitor_timer(chan);
2387 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2389 case L2CAP_EV_RECV_FBIT:
2390 /* Nothing to process */
2397 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2398 struct l2cap_ctrl *control,
2399 struct sk_buff_head *skbs, u8 event)
2401 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2405 case L2CAP_EV_DATA_REQUEST:
2406 if (chan->tx_send_head == NULL)
2407 chan->tx_send_head = skb_peek(skbs);
2408 /* Queue data, but don't send. */
2409 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2411 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2412 BT_DBG("Enter LOCAL_BUSY");
2413 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2415 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2416 /* The SREJ_SENT state must be aborted if we are to
2417 * enter the LOCAL_BUSY state.
2419 l2cap_abort_rx_srej_sent(chan);
2422 l2cap_send_ack(chan);
2425 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2426 BT_DBG("Exit LOCAL_BUSY");
2427 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2429 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2430 struct l2cap_ctrl local_control;
2431 memset(&local_control, 0, sizeof(local_control));
2432 local_control.sframe = 1;
2433 local_control.super = L2CAP_SUPER_RR;
2434 local_control.poll = 1;
2435 local_control.reqseq = chan->buffer_seq;
2436 l2cap_send_sframe(chan, &local_control);
2438 chan->retry_count = 1;
2439 __set_monitor_timer(chan);
2440 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2443 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2444 l2cap_process_reqseq(chan, control->reqseq);
2448 case L2CAP_EV_RECV_FBIT:
2449 if (control && control->final) {
2450 __clear_monitor_timer(chan);
2451 if (chan->unacked_frames > 0)
2452 __set_retrans_timer(chan);
2453 chan->retry_count = 0;
2454 chan->tx_state = L2CAP_TX_STATE_XMIT;
2455 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2458 case L2CAP_EV_EXPLICIT_POLL:
2461 case L2CAP_EV_MONITOR_TO:
2462 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2463 l2cap_send_rr_or_rnr(chan, 1);
2464 __set_monitor_timer(chan);
2465 chan->retry_count++;
2467 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2475 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2476 struct sk_buff_head *skbs, u8 event)
2478 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2479 chan, control, skbs, event, chan->tx_state);
2481 switch (chan->tx_state) {
2482 case L2CAP_TX_STATE_XMIT:
2483 l2cap_tx_state_xmit(chan, control, skbs, event);
2485 case L2CAP_TX_STATE_WAIT_F:
2486 l2cap_tx_state_wait_f(chan, control, skbs, event);
2494 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2495 struct l2cap_ctrl *control)
2497 BT_DBG("chan %p, control %p", chan, control);
2498 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2501 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2502 struct l2cap_ctrl *control)
2504 BT_DBG("chan %p, control %p", chan, control);
2505 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2508 /* Copy frame to all raw sockets on that connection */
2509 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2511 struct sk_buff *nskb;
2512 struct l2cap_chan *chan;
2514 BT_DBG("conn %p", conn);
2516 mutex_lock(&conn->chan_lock);
2518 list_for_each_entry(chan, &conn->chan_l, list) {
2519 struct sock *sk = chan->sk;
2520 if (chan->chan_type != L2CAP_CHAN_RAW)
2523 /* Don't send frame to the socket it came from */
2526 nskb = skb_clone(skb, GFP_ATOMIC);
2530 if (chan->ops->recv(chan, nskb))
2534 mutex_unlock(&conn->chan_lock);
2537 /* ---- L2CAP signalling commands ---- */
2538 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2539 u8 ident, u16 dlen, void *data)
2541 struct sk_buff *skb, **frag;
2542 struct l2cap_cmd_hdr *cmd;
2543 struct l2cap_hdr *lh;
2546 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2547 conn, code, ident, dlen);
2549 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2550 count = min_t(unsigned int, conn->mtu, len);
2552 skb = bt_skb_alloc(count, GFP_ATOMIC);
2556 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2557 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2559 if (conn->hcon->type == LE_LINK)
2560 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2562 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2564 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2567 cmd->len = cpu_to_le16(dlen);
2570 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2571 memcpy(skb_put(skb, count), data, count);
2577 /* Continuation fragments (no L2CAP header) */
2578 frag = &skb_shinfo(skb)->frag_list;
2580 count = min_t(unsigned int, conn->mtu, len);
2582 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2586 memcpy(skb_put(*frag, count), data, count);
2591 frag = &(*frag)->next;
2601 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2603 struct l2cap_conf_opt *opt = *ptr;
2606 len = L2CAP_CONF_OPT_SIZE + opt->len;
2614 *val = *((u8 *) opt->val);
2618 *val = get_unaligned_le16(opt->val);
2622 *val = get_unaligned_le32(opt->val);
2626 *val = (unsigned long) opt->val;
2630 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2634 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2636 struct l2cap_conf_opt *opt = *ptr;
2638 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2645 *((u8 *) opt->val) = val;
2649 put_unaligned_le16(val, opt->val);
2653 put_unaligned_le32(val, opt->val);
2657 memcpy(opt->val, (void *) val, len);
2661 *ptr += L2CAP_CONF_OPT_SIZE + len;
2664 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2666 struct l2cap_conf_efs efs;
2668 switch (chan->mode) {
2669 case L2CAP_MODE_ERTM:
2670 efs.id = chan->local_id;
2671 efs.stype = chan->local_stype;
2672 efs.msdu = cpu_to_le16(chan->local_msdu);
2673 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2674 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2675 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2678 case L2CAP_MODE_STREAMING:
2680 efs.stype = L2CAP_SERV_BESTEFFORT;
2681 efs.msdu = cpu_to_le16(chan->local_msdu);
2682 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2691 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2692 (unsigned long) &efs);
2695 static void l2cap_ack_timeout(struct work_struct *work)
2697 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2701 BT_DBG("chan %p", chan);
2703 l2cap_chan_lock(chan);
2705 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2706 chan->last_acked_seq);
2709 l2cap_send_rr_or_rnr(chan, 0);
2711 l2cap_chan_unlock(chan);
2712 l2cap_chan_put(chan);
2715 int l2cap_ertm_init(struct l2cap_chan *chan)
2719 chan->next_tx_seq = 0;
2720 chan->expected_tx_seq = 0;
2721 chan->expected_ack_seq = 0;
2722 chan->unacked_frames = 0;
2723 chan->buffer_seq = 0;
2724 chan->frames_sent = 0;
2725 chan->last_acked_seq = 0;
2727 chan->sdu_last_frag = NULL;
2730 skb_queue_head_init(&chan->tx_q);
2732 if (chan->mode != L2CAP_MODE_ERTM)
2735 chan->rx_state = L2CAP_RX_STATE_RECV;
2736 chan->tx_state = L2CAP_TX_STATE_XMIT;
2738 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2739 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2740 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2742 skb_queue_head_init(&chan->srej_q);
2744 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2748 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2750 l2cap_seq_list_free(&chan->srej_list);
2755 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2758 case L2CAP_MODE_STREAMING:
2759 case L2CAP_MODE_ERTM:
2760 if (l2cap_mode_supported(mode, remote_feat_mask))
2764 return L2CAP_MODE_BASIC;
2768 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2770 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2773 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2775 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2778 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2780 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2781 __l2cap_ews_supported(chan)) {
2782 /* use extended control field */
2783 set_bit(FLAG_EXT_CTRL, &chan->flags);
2784 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2786 chan->tx_win = min_t(u16, chan->tx_win,
2787 L2CAP_DEFAULT_TX_WINDOW);
2788 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2790 chan->ack_win = chan->tx_win;
2793 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2795 struct l2cap_conf_req *req = data;
2796 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2797 void *ptr = req->data;
2800 BT_DBG("chan %p", chan);
2802 if (chan->num_conf_req || chan->num_conf_rsp)
2805 switch (chan->mode) {
2806 case L2CAP_MODE_STREAMING:
2807 case L2CAP_MODE_ERTM:
2808 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2811 if (__l2cap_efs_supported(chan))
2812 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2816 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2821 if (chan->imtu != L2CAP_DEFAULT_MTU)
2822 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2824 switch (chan->mode) {
2825 case L2CAP_MODE_BASIC:
2826 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2827 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2830 rfc.mode = L2CAP_MODE_BASIC;
2832 rfc.max_transmit = 0;
2833 rfc.retrans_timeout = 0;
2834 rfc.monitor_timeout = 0;
2835 rfc.max_pdu_size = 0;
2837 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2838 (unsigned long) &rfc);
2841 case L2CAP_MODE_ERTM:
2842 rfc.mode = L2CAP_MODE_ERTM;
2843 rfc.max_transmit = chan->max_tx;
2844 rfc.retrans_timeout = 0;
2845 rfc.monitor_timeout = 0;
2847 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2848 L2CAP_EXT_HDR_SIZE -
2851 rfc.max_pdu_size = cpu_to_le16(size);
2853 l2cap_txwin_setup(chan);
2855 rfc.txwin_size = min_t(u16, chan->tx_win,
2856 L2CAP_DEFAULT_TX_WINDOW);
2858 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2859 (unsigned long) &rfc);
2861 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2862 l2cap_add_opt_efs(&ptr, chan);
2864 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2867 if (chan->fcs == L2CAP_FCS_NONE ||
2868 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2869 chan->fcs = L2CAP_FCS_NONE;
2870 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2873 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2874 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2878 case L2CAP_MODE_STREAMING:
2879 l2cap_txwin_setup(chan);
2880 rfc.mode = L2CAP_MODE_STREAMING;
2882 rfc.max_transmit = 0;
2883 rfc.retrans_timeout = 0;
2884 rfc.monitor_timeout = 0;
2886 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2887 L2CAP_EXT_HDR_SIZE -
2890 rfc.max_pdu_size = cpu_to_le16(size);
2892 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2893 (unsigned long) &rfc);
2895 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2896 l2cap_add_opt_efs(&ptr, chan);
2898 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2901 if (chan->fcs == L2CAP_FCS_NONE ||
2902 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2903 chan->fcs = L2CAP_FCS_NONE;
2904 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2909 req->dcid = cpu_to_le16(chan->dcid);
2910 req->flags = __constant_cpu_to_le16(0);
2915 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2917 struct l2cap_conf_rsp *rsp = data;
2918 void *ptr = rsp->data;
2919 void *req = chan->conf_req;
2920 int len = chan->conf_len;
2921 int type, hint, olen;
2923 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2924 struct l2cap_conf_efs efs;
2926 u16 mtu = L2CAP_DEFAULT_MTU;
2927 u16 result = L2CAP_CONF_SUCCESS;
2930 BT_DBG("chan %p", chan);
2932 while (len >= L2CAP_CONF_OPT_SIZE) {
2933 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2935 hint = type & L2CAP_CONF_HINT;
2936 type &= L2CAP_CONF_MASK;
2939 case L2CAP_CONF_MTU:
2943 case L2CAP_CONF_FLUSH_TO:
2944 chan->flush_to = val;
2947 case L2CAP_CONF_QOS:
2950 case L2CAP_CONF_RFC:
2951 if (olen == sizeof(rfc))
2952 memcpy(&rfc, (void *) val, olen);
2955 case L2CAP_CONF_FCS:
2956 if (val == L2CAP_FCS_NONE)
2957 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2960 case L2CAP_CONF_EFS:
2962 if (olen == sizeof(efs))
2963 memcpy(&efs, (void *) val, olen);
2966 case L2CAP_CONF_EWS:
2968 return -ECONNREFUSED;
2970 set_bit(FLAG_EXT_CTRL, &chan->flags);
2971 set_bit(CONF_EWS_RECV, &chan->conf_state);
2972 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2973 chan->remote_tx_win = val;
2980 result = L2CAP_CONF_UNKNOWN;
2981 *((u8 *) ptr++) = type;
2986 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2989 switch (chan->mode) {
2990 case L2CAP_MODE_STREAMING:
2991 case L2CAP_MODE_ERTM:
2992 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2993 chan->mode = l2cap_select_mode(rfc.mode,
2994 chan->conn->feat_mask);
2999 if (__l2cap_efs_supported(chan))
3000 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3002 return -ECONNREFUSED;
3005 if (chan->mode != rfc.mode)
3006 return -ECONNREFUSED;
3012 if (chan->mode != rfc.mode) {
3013 result = L2CAP_CONF_UNACCEPT;
3014 rfc.mode = chan->mode;
3016 if (chan->num_conf_rsp == 1)
3017 return -ECONNREFUSED;
3019 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3020 sizeof(rfc), (unsigned long) &rfc);
3023 if (result == L2CAP_CONF_SUCCESS) {
3024 /* Configure output options and let the other side know
3025 * which ones we don't like. */
3027 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3028 result = L2CAP_CONF_UNACCEPT;
3031 set_bit(CONF_MTU_DONE, &chan->conf_state);
3033 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3036 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3037 efs.stype != L2CAP_SERV_NOTRAFIC &&
3038 efs.stype != chan->local_stype) {
3040 result = L2CAP_CONF_UNACCEPT;
3042 if (chan->num_conf_req >= 1)
3043 return -ECONNREFUSED;
3045 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3047 (unsigned long) &efs);
3049 /* Send PENDING Conf Rsp */
3050 result = L2CAP_CONF_PENDING;
3051 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3056 case L2CAP_MODE_BASIC:
3057 chan->fcs = L2CAP_FCS_NONE;
3058 set_bit(CONF_MODE_DONE, &chan->conf_state);
3061 case L2CAP_MODE_ERTM:
3062 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3063 chan->remote_tx_win = rfc.txwin_size;
3065 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3067 chan->remote_max_tx = rfc.max_transmit;
3069 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3071 L2CAP_EXT_HDR_SIZE -
3074 rfc.max_pdu_size = cpu_to_le16(size);
3075 chan->remote_mps = size;
3077 rfc.retrans_timeout =
3078 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3079 rfc.monitor_timeout =
3080 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3082 set_bit(CONF_MODE_DONE, &chan->conf_state);
3084 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3085 sizeof(rfc), (unsigned long) &rfc);
3087 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3088 chan->remote_id = efs.id;
3089 chan->remote_stype = efs.stype;
3090 chan->remote_msdu = le16_to_cpu(efs.msdu);
3091 chan->remote_flush_to =
3092 le32_to_cpu(efs.flush_to);
3093 chan->remote_acc_lat =
3094 le32_to_cpu(efs.acc_lat);
3095 chan->remote_sdu_itime =
3096 le32_to_cpu(efs.sdu_itime);
3097 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3098 sizeof(efs), (unsigned long) &efs);
3102 case L2CAP_MODE_STREAMING:
3103 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3105 L2CAP_EXT_HDR_SIZE -
3108 rfc.max_pdu_size = cpu_to_le16(size);
3109 chan->remote_mps = size;
3111 set_bit(CONF_MODE_DONE, &chan->conf_state);
3113 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3114 sizeof(rfc), (unsigned long) &rfc);
3119 result = L2CAP_CONF_UNACCEPT;
3121 memset(&rfc, 0, sizeof(rfc));
3122 rfc.mode = chan->mode;
3125 if (result == L2CAP_CONF_SUCCESS)
3126 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3128 rsp->scid = cpu_to_le16(chan->dcid);
3129 rsp->result = cpu_to_le16(result);
3130 rsp->flags = __constant_cpu_to_le16(0);
3135 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3137 struct l2cap_conf_req *req = data;
3138 void *ptr = req->data;
3141 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3142 struct l2cap_conf_efs efs;
3144 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3146 while (len >= L2CAP_CONF_OPT_SIZE) {
3147 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3150 case L2CAP_CONF_MTU:
3151 if (val < L2CAP_DEFAULT_MIN_MTU) {
3152 *result = L2CAP_CONF_UNACCEPT;
3153 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3159 case L2CAP_CONF_FLUSH_TO:
3160 chan->flush_to = val;
3161 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3165 case L2CAP_CONF_RFC:
3166 if (olen == sizeof(rfc))
3167 memcpy(&rfc, (void *)val, olen);
3169 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3170 rfc.mode != chan->mode)
3171 return -ECONNREFUSED;
3175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3176 sizeof(rfc), (unsigned long) &rfc);
3179 case L2CAP_CONF_EWS:
3180 chan->ack_win = min_t(u16, val, chan->ack_win);
3181 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3185 case L2CAP_CONF_EFS:
3186 if (olen == sizeof(efs))
3187 memcpy(&efs, (void *)val, olen);
3189 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3190 efs.stype != L2CAP_SERV_NOTRAFIC &&
3191 efs.stype != chan->local_stype)
3192 return -ECONNREFUSED;
3194 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3195 sizeof(efs), (unsigned long) &efs);
3200 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3201 return -ECONNREFUSED;
3203 chan->mode = rfc.mode;
3205 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3207 case L2CAP_MODE_ERTM:
3208 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3209 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3210 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3211 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3212 chan->ack_win = min_t(u16, chan->ack_win,
3215 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3216 chan->local_msdu = le16_to_cpu(efs.msdu);
3217 chan->local_sdu_itime =
3218 le32_to_cpu(efs.sdu_itime);
3219 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3220 chan->local_flush_to =
3221 le32_to_cpu(efs.flush_to);
3225 case L2CAP_MODE_STREAMING:
3226 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3230 req->dcid = cpu_to_le16(chan->dcid);
3231 req->flags = __constant_cpu_to_le16(0);
3236 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3238 struct l2cap_conf_rsp *rsp = data;
3239 void *ptr = rsp->data;
3241 BT_DBG("chan %p", chan);
3243 rsp->scid = cpu_to_le16(chan->dcid);
3244 rsp->result = cpu_to_le16(result);
3245 rsp->flags = cpu_to_le16(flags);
3250 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3252 struct l2cap_conn_rsp rsp;
3253 struct l2cap_conn *conn = chan->conn;
3256 rsp.scid = cpu_to_le16(chan->dcid);
3257 rsp.dcid = cpu_to_le16(chan->scid);
3258 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3259 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3260 l2cap_send_cmd(conn, chan->ident,
3261 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3263 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3266 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3267 l2cap_build_conf_req(chan, buf), buf);
3268 chan->num_conf_req++;
3271 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3275 /* Use sane default values in case a misbehaving remote device
3276 * did not send an RFC or extended window size option.
3278 u16 txwin_ext = chan->ack_win;
3279 struct l2cap_conf_rfc rfc = {
3281 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3282 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3283 .max_pdu_size = cpu_to_le16(chan->imtu),
3284 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3287 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3289 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3292 while (len >= L2CAP_CONF_OPT_SIZE) {
3293 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3296 case L2CAP_CONF_RFC:
3297 if (olen == sizeof(rfc))
3298 memcpy(&rfc, (void *)val, olen);
3300 case L2CAP_CONF_EWS:
3307 case L2CAP_MODE_ERTM:
3308 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3309 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3310 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3311 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3312 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3314 chan->ack_win = min_t(u16, chan->ack_win,
3317 case L2CAP_MODE_STREAMING:
3318 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3322 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3324 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3326 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3329 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3330 cmd->ident == conn->info_ident) {
3331 cancel_delayed_work(&conn->info_timer);
3333 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3334 conn->info_ident = 0;
3336 l2cap_conn_start(conn);
3342 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3344 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3345 struct l2cap_conn_rsp rsp;
3346 struct l2cap_chan *chan = NULL, *pchan;
3347 struct sock *parent, *sk = NULL;
3348 int result, status = L2CAP_CS_NO_INFO;
3350 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3351 __le16 psm = req->psm;
3353 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3355 /* Check if we have socket listening on psm */
3356 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3358 result = L2CAP_CR_BAD_PSM;
3364 mutex_lock(&conn->chan_lock);
3367 /* Check if the ACL is secure enough (if not SDP) */
3368 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3369 !hci_conn_check_link_mode(conn->hcon)) {
3370 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3371 result = L2CAP_CR_SEC_BLOCK;
3375 result = L2CAP_CR_NO_MEM;
3377 /* Check if we already have channel with that dcid */
3378 if (__l2cap_get_chan_by_dcid(conn, scid))
3381 chan = pchan->ops->new_connection(pchan);
3387 hci_conn_hold(conn->hcon);
3389 bacpy(&bt_sk(sk)->src, conn->src);
3390 bacpy(&bt_sk(sk)->dst, conn->dst);
3394 bt_accept_enqueue(parent, sk);
3396 __l2cap_chan_add(conn, chan);
3400 __set_chan_timer(chan, sk->sk_sndtimeo);
3402 chan->ident = cmd->ident;
3404 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3405 if (l2cap_chan_check_security(chan)) {
3406 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3407 __l2cap_state_change(chan, BT_CONNECT2);
3408 result = L2CAP_CR_PEND;
3409 status = L2CAP_CS_AUTHOR_PEND;
3410 parent->sk_data_ready(parent, 0);
3412 __l2cap_state_change(chan, BT_CONFIG);
3413 result = L2CAP_CR_SUCCESS;
3414 status = L2CAP_CS_NO_INFO;
3417 __l2cap_state_change(chan, BT_CONNECT2);
3418 result = L2CAP_CR_PEND;
3419 status = L2CAP_CS_AUTHEN_PEND;
3422 __l2cap_state_change(chan, BT_CONNECT2);
3423 result = L2CAP_CR_PEND;
3424 status = L2CAP_CS_NO_INFO;
3428 release_sock(parent);
3429 mutex_unlock(&conn->chan_lock);
3432 rsp.scid = cpu_to_le16(scid);
3433 rsp.dcid = cpu_to_le16(dcid);
3434 rsp.result = cpu_to_le16(result);
3435 rsp.status = cpu_to_le16(status);
3436 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3438 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3439 struct l2cap_info_req info;
3440 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3442 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3443 conn->info_ident = l2cap_get_ident(conn);
3445 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3447 l2cap_send_cmd(conn, conn->info_ident,
3448 L2CAP_INFO_REQ, sizeof(info), &info);
3451 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3452 result == L2CAP_CR_SUCCESS) {
3454 set_bit(CONF_REQ_SENT, &chan->conf_state);
3455 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3456 l2cap_build_conf_req(chan, buf), buf);
3457 chan->num_conf_req++;
3463 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3465 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3466 u16 scid, dcid, result, status;
3467 struct l2cap_chan *chan;
3471 scid = __le16_to_cpu(rsp->scid);
3472 dcid = __le16_to_cpu(rsp->dcid);
3473 result = __le16_to_cpu(rsp->result);
3474 status = __le16_to_cpu(rsp->status);
3476 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3477 dcid, scid, result, status);
3479 mutex_lock(&conn->chan_lock);
3482 chan = __l2cap_get_chan_by_scid(conn, scid);
3488 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3497 l2cap_chan_lock(chan);
3500 case L2CAP_CR_SUCCESS:
3501 l2cap_state_change(chan, BT_CONFIG);
3504 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3506 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3509 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3510 l2cap_build_conf_req(chan, req), req);
3511 chan->num_conf_req++;
3515 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3519 l2cap_chan_del(chan, ECONNREFUSED);
3523 l2cap_chan_unlock(chan);
3526 mutex_unlock(&conn->chan_lock);
3531 static inline void set_default_fcs(struct l2cap_chan *chan)
3533 /* FCS is enabled only in ERTM or streaming mode, if one or both
3536 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3537 chan->fcs = L2CAP_FCS_NONE;
3538 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3539 chan->fcs = L2CAP_FCS_CRC16;
3542 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3544 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3547 struct l2cap_chan *chan;
3550 dcid = __le16_to_cpu(req->dcid);
3551 flags = __le16_to_cpu(req->flags);
3553 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3555 chan = l2cap_get_chan_by_scid(conn, dcid);
3559 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3560 struct l2cap_cmd_rej_cid rej;
3562 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3563 rej.scid = cpu_to_le16(chan->scid);
3564 rej.dcid = cpu_to_le16(chan->dcid);
3566 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3571 /* Reject if config buffer is too small. */
3572 len = cmd_len - sizeof(*req);
3573 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3574 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3575 l2cap_build_conf_rsp(chan, rsp,
3576 L2CAP_CONF_REJECT, flags), rsp);
3581 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3582 chan->conf_len += len;
3584 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3585 /* Incomplete config. Send empty response. */
3586 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3587 l2cap_build_conf_rsp(chan, rsp,
3588 L2CAP_CONF_SUCCESS, flags), rsp);
3592 /* Complete config. */
3593 len = l2cap_parse_conf_req(chan, rsp);
3595 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3599 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3600 chan->num_conf_rsp++;
3602 /* Reset config buffer. */
3605 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3608 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3609 set_default_fcs(chan);
3611 if (chan->mode == L2CAP_MODE_ERTM ||
3612 chan->mode == L2CAP_MODE_STREAMING)
3613 err = l2cap_ertm_init(chan);
3616 l2cap_send_disconn_req(chan->conn, chan, -err);
3618 l2cap_chan_ready(chan);
3623 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3625 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3626 l2cap_build_conf_req(chan, buf), buf);
3627 chan->num_conf_req++;
3630 /* Got Conf Rsp PENDING from remote side and asume we sent
3631 Conf Rsp PENDING in the code above */
3632 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3633 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3635 /* check compatibility */
3637 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3638 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3640 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3641 l2cap_build_conf_rsp(chan, rsp,
3642 L2CAP_CONF_SUCCESS, flags), rsp);
3646 l2cap_chan_unlock(chan);
3650 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3652 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3653 u16 scid, flags, result;
3654 struct l2cap_chan *chan;
3655 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3658 scid = __le16_to_cpu(rsp->scid);
3659 flags = __le16_to_cpu(rsp->flags);
3660 result = __le16_to_cpu(rsp->result);
3662 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3665 chan = l2cap_get_chan_by_scid(conn, scid);
3670 case L2CAP_CONF_SUCCESS:
3671 l2cap_conf_rfc_get(chan, rsp->data, len);
3672 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3675 case L2CAP_CONF_PENDING:
3676 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3678 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3681 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3684 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3688 /* check compatibility */
3690 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3691 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3693 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3694 l2cap_build_conf_rsp(chan, buf,
3695 L2CAP_CONF_SUCCESS, 0x0000), buf);
3699 case L2CAP_CONF_UNACCEPT:
3700 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3703 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3704 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3708 /* throw out any old stored conf requests */
3709 result = L2CAP_CONF_SUCCESS;
3710 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3713 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3717 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3718 L2CAP_CONF_REQ, len, req);
3719 chan->num_conf_req++;
3720 if (result != L2CAP_CONF_SUCCESS)
3726 l2cap_chan_set_err(chan, ECONNRESET);
3728 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3729 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3733 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3736 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3738 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3739 set_default_fcs(chan);
3741 if (chan->mode == L2CAP_MODE_ERTM ||
3742 chan->mode == L2CAP_MODE_STREAMING)
3743 err = l2cap_ertm_init(chan);
3746 l2cap_send_disconn_req(chan->conn, chan, -err);
3748 l2cap_chan_ready(chan);
3752 l2cap_chan_unlock(chan);
3756 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3758 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3759 struct l2cap_disconn_rsp rsp;
3761 struct l2cap_chan *chan;
3764 scid = __le16_to_cpu(req->scid);
3765 dcid = __le16_to_cpu(req->dcid);
3767 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3769 mutex_lock(&conn->chan_lock);
3771 chan = __l2cap_get_chan_by_scid(conn, dcid);
3773 mutex_unlock(&conn->chan_lock);
3777 l2cap_chan_lock(chan);
3781 rsp.dcid = cpu_to_le16(chan->scid);
3782 rsp.scid = cpu_to_le16(chan->dcid);
3783 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3786 sk->sk_shutdown = SHUTDOWN_MASK;
3789 l2cap_chan_hold(chan);
3790 l2cap_chan_del(chan, ECONNRESET);
3792 l2cap_chan_unlock(chan);
3794 chan->ops->close(chan);
3795 l2cap_chan_put(chan);
3797 mutex_unlock(&conn->chan_lock);
3802 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3804 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3806 struct l2cap_chan *chan;
3808 scid = __le16_to_cpu(rsp->scid);
3809 dcid = __le16_to_cpu(rsp->dcid);
3811 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3813 mutex_lock(&conn->chan_lock);
3815 chan = __l2cap_get_chan_by_scid(conn, scid);
3817 mutex_unlock(&conn->chan_lock);
3821 l2cap_chan_lock(chan);
3823 l2cap_chan_hold(chan);
3824 l2cap_chan_del(chan, 0);
3826 l2cap_chan_unlock(chan);
3828 chan->ops->close(chan);
3829 l2cap_chan_put(chan);
3831 mutex_unlock(&conn->chan_lock);
3836 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3838 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3841 type = __le16_to_cpu(req->type);
3843 BT_DBG("type 0x%4.4x", type);
3845 if (type == L2CAP_IT_FEAT_MASK) {
3847 u32 feat_mask = l2cap_feat_mask;
3848 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3849 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3850 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3852 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3855 feat_mask |= L2CAP_FEAT_EXT_FLOW
3856 | L2CAP_FEAT_EXT_WINDOW;
3858 put_unaligned_le32(feat_mask, rsp->data);
3859 l2cap_send_cmd(conn, cmd->ident,
3860 L2CAP_INFO_RSP, sizeof(buf), buf);
3861 } else if (type == L2CAP_IT_FIXED_CHAN) {
3863 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3866 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3868 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3870 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3871 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3872 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3873 l2cap_send_cmd(conn, cmd->ident,
3874 L2CAP_INFO_RSP, sizeof(buf), buf);
3876 struct l2cap_info_rsp rsp;
3877 rsp.type = cpu_to_le16(type);
3878 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3879 l2cap_send_cmd(conn, cmd->ident,
3880 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3886 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3891 type = __le16_to_cpu(rsp->type);
3892 result = __le16_to_cpu(rsp->result);
3894 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3896 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3897 if (cmd->ident != conn->info_ident ||
3898 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3901 cancel_delayed_work(&conn->info_timer);
3903 if (result != L2CAP_IR_SUCCESS) {
3904 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3905 conn->info_ident = 0;
3907 l2cap_conn_start(conn);
3913 case L2CAP_IT_FEAT_MASK:
3914 conn->feat_mask = get_unaligned_le32(rsp->data);
3916 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3917 struct l2cap_info_req req;
3918 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3920 conn->info_ident = l2cap_get_ident(conn);
3922 l2cap_send_cmd(conn, conn->info_ident,
3923 L2CAP_INFO_REQ, sizeof(req), &req);
3925 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3926 conn->info_ident = 0;
3928 l2cap_conn_start(conn);
3932 case L2CAP_IT_FIXED_CHAN:
3933 conn->fixed_chan_mask = rsp->data[0];
3934 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3935 conn->info_ident = 0;
3937 l2cap_conn_start(conn);
3944 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3945 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3948 struct l2cap_create_chan_req *req = data;
3949 struct l2cap_create_chan_rsp rsp;
3952 if (cmd_len != sizeof(*req))
3958 psm = le16_to_cpu(req->psm);
3959 scid = le16_to_cpu(req->scid);
3961 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3963 /* Placeholder: Always reject */
3965 rsp.scid = cpu_to_le16(scid);
3966 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3967 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3969 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3975 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3976 struct l2cap_cmd_hdr *cmd, void *data)
3978 BT_DBG("conn %p", conn);
3980 return l2cap_connect_rsp(conn, cmd, data);
3983 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3984 u16 icid, u16 result)
3986 struct l2cap_move_chan_rsp rsp;
3988 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3990 rsp.icid = cpu_to_le16(icid);
3991 rsp.result = cpu_to_le16(result);
3993 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3996 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3997 struct l2cap_chan *chan,
3998 u16 icid, u16 result)
4000 struct l2cap_move_chan_cfm cfm;
4003 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4005 ident = l2cap_get_ident(conn);
4007 chan->ident = ident;
4009 cfm.icid = cpu_to_le16(icid);
4010 cfm.result = cpu_to_le16(result);
4012 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4015 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4018 struct l2cap_move_chan_cfm_rsp rsp;
4020 BT_DBG("icid 0x%4.4x", icid);
4022 rsp.icid = cpu_to_le16(icid);
4023 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4026 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4027 struct l2cap_cmd_hdr *cmd,
4028 u16 cmd_len, void *data)
4030 struct l2cap_move_chan_req *req = data;
4032 u16 result = L2CAP_MR_NOT_ALLOWED;
4034 if (cmd_len != sizeof(*req))
4037 icid = le16_to_cpu(req->icid);
4039 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4044 /* Placeholder: Always refuse */
4045 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4050 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4051 struct l2cap_cmd_hdr *cmd,
4052 u16 cmd_len, void *data)
4054 struct l2cap_move_chan_rsp *rsp = data;
4057 if (cmd_len != sizeof(*rsp))
4060 icid = le16_to_cpu(rsp->icid);
4061 result = le16_to_cpu(rsp->result);
4063 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4065 /* Placeholder: Always unconfirmed */
4066 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4071 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4072 struct l2cap_cmd_hdr *cmd,
4073 u16 cmd_len, void *data)
4075 struct l2cap_move_chan_cfm *cfm = data;
4078 if (cmd_len != sizeof(*cfm))
4081 icid = le16_to_cpu(cfm->icid);
4082 result = le16_to_cpu(cfm->result);
4084 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4086 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4091 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd,
4093 u16 cmd_len, void *data)
4095 struct l2cap_move_chan_cfm_rsp *rsp = data;
4098 if (cmd_len != sizeof(*rsp))
4101 icid = le16_to_cpu(rsp->icid);
4103 BT_DBG("icid 0x%4.4x", icid);
4108 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4113 if (min > max || min < 6 || max > 3200)
4116 if (to_multiplier < 10 || to_multiplier > 3200)
4119 if (max >= to_multiplier * 8)
4122 max_latency = (to_multiplier * 8 / max) - 1;
4123 if (latency > 499 || latency > max_latency)
4129 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4130 struct l2cap_cmd_hdr *cmd, u8 *data)
4132 struct hci_conn *hcon = conn->hcon;
4133 struct l2cap_conn_param_update_req *req;
4134 struct l2cap_conn_param_update_rsp rsp;
4135 u16 min, max, latency, to_multiplier, cmd_len;
4138 if (!(hcon->link_mode & HCI_LM_MASTER))
4141 cmd_len = __le16_to_cpu(cmd->len);
4142 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4145 req = (struct l2cap_conn_param_update_req *) data;
4146 min = __le16_to_cpu(req->min);
4147 max = __le16_to_cpu(req->max);
4148 latency = __le16_to_cpu(req->latency);
4149 to_multiplier = __le16_to_cpu(req->to_multiplier);
4151 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4152 min, max, latency, to_multiplier);
4154 memset(&rsp, 0, sizeof(rsp));
4156 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4158 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4160 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4162 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4166 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4171 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4172 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4176 switch (cmd->code) {
4177 case L2CAP_COMMAND_REJ:
4178 l2cap_command_rej(conn, cmd, data);
4181 case L2CAP_CONN_REQ:
4182 err = l2cap_connect_req(conn, cmd, data);
4185 case L2CAP_CONN_RSP:
4186 err = l2cap_connect_rsp(conn, cmd, data);
4189 case L2CAP_CONF_REQ:
4190 err = l2cap_config_req(conn, cmd, cmd_len, data);
4193 case L2CAP_CONF_RSP:
4194 err = l2cap_config_rsp(conn, cmd, data);
4197 case L2CAP_DISCONN_REQ:
4198 err = l2cap_disconnect_req(conn, cmd, data);
4201 case L2CAP_DISCONN_RSP:
4202 err = l2cap_disconnect_rsp(conn, cmd, data);
4205 case L2CAP_ECHO_REQ:
4206 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4209 case L2CAP_ECHO_RSP:
4212 case L2CAP_INFO_REQ:
4213 err = l2cap_information_req(conn, cmd, data);
4216 case L2CAP_INFO_RSP:
4217 err = l2cap_information_rsp(conn, cmd, data);
4220 case L2CAP_CREATE_CHAN_REQ:
4221 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4224 case L2CAP_CREATE_CHAN_RSP:
4225 err = l2cap_create_channel_rsp(conn, cmd, data);
4228 case L2CAP_MOVE_CHAN_REQ:
4229 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4232 case L2CAP_MOVE_CHAN_RSP:
4233 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4236 case L2CAP_MOVE_CHAN_CFM:
4237 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4240 case L2CAP_MOVE_CHAN_CFM_RSP:
4241 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4245 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4253 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4254 struct l2cap_cmd_hdr *cmd, u8 *data)
4256 switch (cmd->code) {
4257 case L2CAP_COMMAND_REJ:
4260 case L2CAP_CONN_PARAM_UPDATE_REQ:
4261 return l2cap_conn_param_update_req(conn, cmd, data);
4263 case L2CAP_CONN_PARAM_UPDATE_RSP:
4267 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4272 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4273 struct sk_buff *skb)
4275 u8 *data = skb->data;
4277 struct l2cap_cmd_hdr cmd;
4280 l2cap_raw_recv(conn, skb);
4282 while (len >= L2CAP_CMD_HDR_SIZE) {
4284 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4285 data += L2CAP_CMD_HDR_SIZE;
4286 len -= L2CAP_CMD_HDR_SIZE;
4288 cmd_len = le16_to_cpu(cmd.len);
4290 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4292 if (cmd_len > len || !cmd.ident) {
4293 BT_DBG("corrupted command");
4297 if (conn->hcon->type == LE_LINK)
4298 err = l2cap_le_sig_cmd(conn, &cmd, data);
4300 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4303 struct l2cap_cmd_rej_unk rej;
4305 BT_ERR("Wrong link type (%d)", err);
4307 /* FIXME: Map err to a valid reason */
4308 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4309 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4319 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4321 u16 our_fcs, rcv_fcs;
4324 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4325 hdr_size = L2CAP_EXT_HDR_SIZE;
4327 hdr_size = L2CAP_ENH_HDR_SIZE;
4329 if (chan->fcs == L2CAP_FCS_CRC16) {
4330 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4331 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4332 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4334 if (our_fcs != rcv_fcs)
4340 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4342 struct l2cap_ctrl control;
4344 BT_DBG("chan %p", chan);
4346 memset(&control, 0, sizeof(control));
4349 control.reqseq = chan->buffer_seq;
4350 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4352 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4353 control.super = L2CAP_SUPER_RNR;
4354 l2cap_send_sframe(chan, &control);
4357 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4358 chan->unacked_frames > 0)
4359 __set_retrans_timer(chan);
4361 /* Send pending iframes */
4362 l2cap_ertm_send(chan);
4364 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4365 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4366 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4369 control.super = L2CAP_SUPER_RR;
4370 l2cap_send_sframe(chan, &control);
4374 static void append_skb_frag(struct sk_buff *skb,
4375 struct sk_buff *new_frag, struct sk_buff **last_frag)
4377 /* skb->len reflects data in skb as well as all fragments
4378 * skb->data_len reflects only data in fragments
4380 if (!skb_has_frag_list(skb))
4381 skb_shinfo(skb)->frag_list = new_frag;
4383 new_frag->next = NULL;
4385 (*last_frag)->next = new_frag;
4386 *last_frag = new_frag;
4388 skb->len += new_frag->len;
4389 skb->data_len += new_frag->len;
4390 skb->truesize += new_frag->truesize;
4393 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4394 struct l2cap_ctrl *control)
4398 switch (control->sar) {
4399 case L2CAP_SAR_UNSEGMENTED:
4403 err = chan->ops->recv(chan, skb);
4406 case L2CAP_SAR_START:
4410 chan->sdu_len = get_unaligned_le16(skb->data);
4411 skb_pull(skb, L2CAP_SDULEN_SIZE);
4413 if (chan->sdu_len > chan->imtu) {
4418 if (skb->len >= chan->sdu_len)
4422 chan->sdu_last_frag = skb;
4428 case L2CAP_SAR_CONTINUE:
4432 append_skb_frag(chan->sdu, skb,
4433 &chan->sdu_last_frag);
4436 if (chan->sdu->len >= chan->sdu_len)
4446 append_skb_frag(chan->sdu, skb,
4447 &chan->sdu_last_frag);
4450 if (chan->sdu->len != chan->sdu_len)
4453 err = chan->ops->recv(chan, chan->sdu);
4456 /* Reassembly complete */
4458 chan->sdu_last_frag = NULL;
4466 kfree_skb(chan->sdu);
4468 chan->sdu_last_frag = NULL;
4475 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4479 if (chan->mode != L2CAP_MODE_ERTM)
4482 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4483 l2cap_tx(chan, NULL, NULL, event);
4486 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4489 /* Pass sequential frames to l2cap_reassemble_sdu()
4490 * until a gap is encountered.
4493 BT_DBG("chan %p", chan);
4495 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4496 struct sk_buff *skb;
4497 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4498 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4500 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4505 skb_unlink(skb, &chan->srej_q);
4506 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4507 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4512 if (skb_queue_empty(&chan->srej_q)) {
4513 chan->rx_state = L2CAP_RX_STATE_RECV;
4514 l2cap_send_ack(chan);
4520 static void l2cap_handle_srej(struct l2cap_chan *chan,
4521 struct l2cap_ctrl *control)
4523 struct sk_buff *skb;
4525 BT_DBG("chan %p, control %p", chan, control);
4527 if (control->reqseq == chan->next_tx_seq) {
4528 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4529 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4533 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4536 BT_DBG("Seq %d not available for retransmission",
4541 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4542 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4547 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4549 if (control->poll) {
4550 l2cap_pass_to_tx(chan, control);
4552 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4553 l2cap_retransmit(chan, control);
4554 l2cap_ertm_send(chan);
4556 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4557 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4558 chan->srej_save_reqseq = control->reqseq;
4561 l2cap_pass_to_tx_fbit(chan, control);
4563 if (control->final) {
4564 if (chan->srej_save_reqseq != control->reqseq ||
4565 !test_and_clear_bit(CONN_SREJ_ACT,
4567 l2cap_retransmit(chan, control);
4569 l2cap_retransmit(chan, control);
4570 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4571 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4572 chan->srej_save_reqseq = control->reqseq;
4578 static void l2cap_handle_rej(struct l2cap_chan *chan,
4579 struct l2cap_ctrl *control)
4581 struct sk_buff *skb;
4583 BT_DBG("chan %p, control %p", chan, control);
4585 if (control->reqseq == chan->next_tx_seq) {
4586 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4587 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4591 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4593 if (chan->max_tx && skb &&
4594 bt_cb(skb)->control.retries >= chan->max_tx) {
4595 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4596 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4600 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4602 l2cap_pass_to_tx(chan, control);
4604 if (control->final) {
4605 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4606 l2cap_retransmit_all(chan, control);
4608 l2cap_retransmit_all(chan, control);
4609 l2cap_ertm_send(chan);
4610 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4611 set_bit(CONN_REJ_ACT, &chan->conn_state);
4615 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4617 BT_DBG("chan %p, txseq %d", chan, txseq);
4619 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4620 chan->expected_tx_seq);
4622 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4623 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4625 /* See notes below regarding "double poll" and
4628 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4629 BT_DBG("Invalid/Ignore - after SREJ");
4630 return L2CAP_TXSEQ_INVALID_IGNORE;
4632 BT_DBG("Invalid - in window after SREJ sent");
4633 return L2CAP_TXSEQ_INVALID;
4637 if (chan->srej_list.head == txseq) {
4638 BT_DBG("Expected SREJ");
4639 return L2CAP_TXSEQ_EXPECTED_SREJ;
4642 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4643 BT_DBG("Duplicate SREJ - txseq already stored");
4644 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4647 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4648 BT_DBG("Unexpected SREJ - not requested");
4649 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4653 if (chan->expected_tx_seq == txseq) {
4654 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4656 BT_DBG("Invalid - txseq outside tx window");
4657 return L2CAP_TXSEQ_INVALID;
4660 return L2CAP_TXSEQ_EXPECTED;
4664 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4665 __seq_offset(chan, chan->expected_tx_seq,
4666 chan->last_acked_seq)){
4667 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4668 return L2CAP_TXSEQ_DUPLICATE;
4671 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4672 /* A source of invalid packets is a "double poll" condition,
4673 * where delays cause us to send multiple poll packets. If
4674 * the remote stack receives and processes both polls,
4675 * sequence numbers can wrap around in such a way that a
4676 * resent frame has a sequence number that looks like new data
4677 * with a sequence gap. This would trigger an erroneous SREJ
4680 * Fortunately, this is impossible with a tx window that's
4681 * less than half of the maximum sequence number, which allows
4682 * invalid frames to be safely ignored.
4684 * With tx window sizes greater than half of the tx window
4685 * maximum, the frame is invalid and cannot be ignored. This
4686 * causes a disconnect.
4689 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4690 BT_DBG("Invalid/Ignore - txseq outside tx window");
4691 return L2CAP_TXSEQ_INVALID_IGNORE;
4693 BT_DBG("Invalid - txseq outside tx window");
4694 return L2CAP_TXSEQ_INVALID;
4697 BT_DBG("Unexpected - txseq indicates missing frames");
4698 return L2CAP_TXSEQ_UNEXPECTED;
4702 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4703 struct l2cap_ctrl *control,
4704 struct sk_buff *skb, u8 event)
4707 bool skb_in_use = 0;
4709 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4713 case L2CAP_EV_RECV_IFRAME:
4714 switch (l2cap_classify_txseq(chan, control->txseq)) {
4715 case L2CAP_TXSEQ_EXPECTED:
4716 l2cap_pass_to_tx(chan, control);
4718 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4719 BT_DBG("Busy, discarding expected seq %d",
4724 chan->expected_tx_seq = __next_seq(chan,
4727 chan->buffer_seq = chan->expected_tx_seq;
4730 err = l2cap_reassemble_sdu(chan, skb, control);
4734 if (control->final) {
4735 if (!test_and_clear_bit(CONN_REJ_ACT,
4736 &chan->conn_state)) {
4738 l2cap_retransmit_all(chan, control);
4739 l2cap_ertm_send(chan);
4743 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4744 l2cap_send_ack(chan);
4746 case L2CAP_TXSEQ_UNEXPECTED:
4747 l2cap_pass_to_tx(chan, control);
4749 /* Can't issue SREJ frames in the local busy state.
4750 * Drop this frame, it will be seen as missing
4751 * when local busy is exited.
4753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4754 BT_DBG("Busy, discarding unexpected seq %d",
4759 /* There was a gap in the sequence, so an SREJ
4760 * must be sent for each missing frame. The
4761 * current frame is stored for later use.
4763 skb_queue_tail(&chan->srej_q, skb);
4765 BT_DBG("Queued %p (queue len %d)", skb,
4766 skb_queue_len(&chan->srej_q));
4768 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4769 l2cap_seq_list_clear(&chan->srej_list);
4770 l2cap_send_srej(chan, control->txseq);
4772 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4774 case L2CAP_TXSEQ_DUPLICATE:
4775 l2cap_pass_to_tx(chan, control);
4777 case L2CAP_TXSEQ_INVALID_IGNORE:
4779 case L2CAP_TXSEQ_INVALID:
4781 l2cap_send_disconn_req(chan->conn, chan,
4786 case L2CAP_EV_RECV_RR:
4787 l2cap_pass_to_tx(chan, control);
4788 if (control->final) {
4789 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4791 if (!test_and_clear_bit(CONN_REJ_ACT,
4792 &chan->conn_state)) {
4794 l2cap_retransmit_all(chan, control);
4797 l2cap_ertm_send(chan);
4798 } else if (control->poll) {
4799 l2cap_send_i_or_rr_or_rnr(chan);
4801 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4802 &chan->conn_state) &&
4803 chan->unacked_frames)
4804 __set_retrans_timer(chan);
4806 l2cap_ertm_send(chan);
4809 case L2CAP_EV_RECV_RNR:
4810 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4811 l2cap_pass_to_tx(chan, control);
4812 if (control && control->poll) {
4813 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4814 l2cap_send_rr_or_rnr(chan, 0);
4816 __clear_retrans_timer(chan);
4817 l2cap_seq_list_clear(&chan->retrans_list);
4819 case L2CAP_EV_RECV_REJ:
4820 l2cap_handle_rej(chan, control);
4822 case L2CAP_EV_RECV_SREJ:
4823 l2cap_handle_srej(chan, control);
4829 if (skb && !skb_in_use) {
4830 BT_DBG("Freeing %p", skb);
4837 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4838 struct l2cap_ctrl *control,
4839 struct sk_buff *skb, u8 event)
4842 u16 txseq = control->txseq;
4843 bool skb_in_use = 0;
4845 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4849 case L2CAP_EV_RECV_IFRAME:
4850 switch (l2cap_classify_txseq(chan, txseq)) {
4851 case L2CAP_TXSEQ_EXPECTED:
4852 /* Keep frame for reassembly later */
4853 l2cap_pass_to_tx(chan, control);
4854 skb_queue_tail(&chan->srej_q, skb);
4856 BT_DBG("Queued %p (queue len %d)", skb,
4857 skb_queue_len(&chan->srej_q));
4859 chan->expected_tx_seq = __next_seq(chan, txseq);
4861 case L2CAP_TXSEQ_EXPECTED_SREJ:
4862 l2cap_seq_list_pop(&chan->srej_list);
4864 l2cap_pass_to_tx(chan, control);
4865 skb_queue_tail(&chan->srej_q, skb);
4867 BT_DBG("Queued %p (queue len %d)", skb,
4868 skb_queue_len(&chan->srej_q));
4870 err = l2cap_rx_queued_iframes(chan);
4875 case L2CAP_TXSEQ_UNEXPECTED:
4876 /* Got a frame that can't be reassembled yet.
4877 * Save it for later, and send SREJs to cover
4878 * the missing frames.
4880 skb_queue_tail(&chan->srej_q, skb);
4882 BT_DBG("Queued %p (queue len %d)", skb,
4883 skb_queue_len(&chan->srej_q));
4885 l2cap_pass_to_tx(chan, control);
4886 l2cap_send_srej(chan, control->txseq);
4888 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4889 /* This frame was requested with an SREJ, but
4890 * some expected retransmitted frames are
4891 * missing. Request retransmission of missing
4894 skb_queue_tail(&chan->srej_q, skb);
4896 BT_DBG("Queued %p (queue len %d)", skb,
4897 skb_queue_len(&chan->srej_q));
4899 l2cap_pass_to_tx(chan, control);
4900 l2cap_send_srej_list(chan, control->txseq);
4902 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4903 /* We've already queued this frame. Drop this copy. */
4904 l2cap_pass_to_tx(chan, control);
4906 case L2CAP_TXSEQ_DUPLICATE:
4907 /* Expecting a later sequence number, so this frame
4908 * was already received. Ignore it completely.
4911 case L2CAP_TXSEQ_INVALID_IGNORE:
4913 case L2CAP_TXSEQ_INVALID:
4915 l2cap_send_disconn_req(chan->conn, chan,
4920 case L2CAP_EV_RECV_RR:
4921 l2cap_pass_to_tx(chan, control);
4922 if (control->final) {
4923 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4925 if (!test_and_clear_bit(CONN_REJ_ACT,
4926 &chan->conn_state)) {
4928 l2cap_retransmit_all(chan, control);
4931 l2cap_ertm_send(chan);
4932 } else if (control->poll) {
4933 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4934 &chan->conn_state) &&
4935 chan->unacked_frames) {
4936 __set_retrans_timer(chan);
4939 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4940 l2cap_send_srej_tail(chan);
4942 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4943 &chan->conn_state) &&
4944 chan->unacked_frames)
4945 __set_retrans_timer(chan);
4947 l2cap_send_ack(chan);
4950 case L2CAP_EV_RECV_RNR:
4951 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4952 l2cap_pass_to_tx(chan, control);
4953 if (control->poll) {
4954 l2cap_send_srej_tail(chan);
4956 struct l2cap_ctrl rr_control;
4957 memset(&rr_control, 0, sizeof(rr_control));
4958 rr_control.sframe = 1;
4959 rr_control.super = L2CAP_SUPER_RR;
4960 rr_control.reqseq = chan->buffer_seq;
4961 l2cap_send_sframe(chan, &rr_control);
4965 case L2CAP_EV_RECV_REJ:
4966 l2cap_handle_rej(chan, control);
4968 case L2CAP_EV_RECV_SREJ:
4969 l2cap_handle_srej(chan, control);
4973 if (skb && !skb_in_use) {
4974 BT_DBG("Freeing %p", skb);
4981 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4983 /* Make sure reqseq is for a packet that has been sent but not acked */
4986 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4987 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4990 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4991 struct sk_buff *skb, u8 event)
4995 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4996 control, skb, event, chan->rx_state);
4998 if (__valid_reqseq(chan, control->reqseq)) {
4999 switch (chan->rx_state) {
5000 case L2CAP_RX_STATE_RECV:
5001 err = l2cap_rx_state_recv(chan, control, skb, event);
5003 case L2CAP_RX_STATE_SREJ_SENT:
5004 err = l2cap_rx_state_srej_sent(chan, control, skb,
5012 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5013 control->reqseq, chan->next_tx_seq,
5014 chan->expected_ack_seq);
5015 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5021 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5022 struct sk_buff *skb)
5026 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5029 if (l2cap_classify_txseq(chan, control->txseq) ==
5030 L2CAP_TXSEQ_EXPECTED) {
5031 l2cap_pass_to_tx(chan, control);
5033 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5034 __next_seq(chan, chan->buffer_seq));
5036 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5038 l2cap_reassemble_sdu(chan, skb, control);
5041 kfree_skb(chan->sdu);
5044 chan->sdu_last_frag = NULL;
5048 BT_DBG("Freeing %p", skb);
5053 chan->last_acked_seq = control->txseq;
5054 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5059 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5061 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5065 __unpack_control(chan, skb);
5070 * We can just drop the corrupted I-frame here.
5071 * Receiver will miss it and start proper recovery
5072 * procedures and ask for retransmission.
5074 if (l2cap_check_fcs(chan, skb))
5077 if (!control->sframe && control->sar == L2CAP_SAR_START)
5078 len -= L2CAP_SDULEN_SIZE;
5080 if (chan->fcs == L2CAP_FCS_CRC16)
5081 len -= L2CAP_FCS_SIZE;
5083 if (len > chan->mps) {
5084 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5088 if (!control->sframe) {
5091 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5092 control->sar, control->reqseq, control->final,
5095 /* Validate F-bit - F=0 always valid, F=1 only
5096 * valid in TX WAIT_F
5098 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5101 if (chan->mode != L2CAP_MODE_STREAMING) {
5102 event = L2CAP_EV_RECV_IFRAME;
5103 err = l2cap_rx(chan, control, skb, event);
5105 err = l2cap_stream_rx(chan, control, skb);
5109 l2cap_send_disconn_req(chan->conn, chan,
5112 const u8 rx_func_to_event[4] = {
5113 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5114 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5117 /* Only I-frames are expected in streaming mode */
5118 if (chan->mode == L2CAP_MODE_STREAMING)
5121 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5122 control->reqseq, control->final, control->poll,
5127 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5131 /* Validate F and P bits */
5132 if (control->final && (control->poll ||
5133 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5136 event = rx_func_to_event[control->super];
5137 if (l2cap_rx(chan, control, skb, event))
5138 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5148 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5149 struct sk_buff *skb)
5151 struct l2cap_chan *chan;
5153 chan = l2cap_get_chan_by_scid(conn, cid);
5155 if (cid == L2CAP_CID_A2MP) {
5156 chan = a2mp_channel_create(conn, skb);
5162 l2cap_chan_lock(chan);
5164 BT_DBG("unknown cid 0x%4.4x", cid);
5165 /* Drop packet and return */
5171 BT_DBG("chan %p, len %d", chan, skb->len);
5173 if (chan->state != BT_CONNECTED)
5176 switch (chan->mode) {
5177 case L2CAP_MODE_BASIC:
5178 /* If socket recv buffers overflows we drop data here
5179 * which is *bad* because L2CAP has to be reliable.
5180 * But we don't have any other choice. L2CAP doesn't
5181 * provide flow control mechanism. */
5183 if (chan->imtu < skb->len)
5186 if (!chan->ops->recv(chan, skb))
5190 case L2CAP_MODE_ERTM:
5191 case L2CAP_MODE_STREAMING:
5192 l2cap_data_rcv(chan, skb);
5196 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5204 l2cap_chan_unlock(chan);
5207 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5208 struct sk_buff *skb)
5210 struct l2cap_chan *chan;
5212 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5216 BT_DBG("chan %p, len %d", chan, skb->len);
5218 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5221 if (chan->imtu < skb->len)
5224 if (!chan->ops->recv(chan, skb))
5231 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5232 struct sk_buff *skb)
5234 struct l2cap_chan *chan;
5236 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5240 BT_DBG("chan %p, len %d", chan, skb->len);
5242 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5245 if (chan->imtu < skb->len)
5248 if (!chan->ops->recv(chan, skb))
5255 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5257 struct l2cap_hdr *lh = (void *) skb->data;
5261 skb_pull(skb, L2CAP_HDR_SIZE);
5262 cid = __le16_to_cpu(lh->cid);
5263 len = __le16_to_cpu(lh->len);
5265 if (len != skb->len) {
5270 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5273 case L2CAP_CID_LE_SIGNALING:
5274 case L2CAP_CID_SIGNALING:
5275 l2cap_sig_channel(conn, skb);
5278 case L2CAP_CID_CONN_LESS:
5279 psm = get_unaligned((__le16 *) skb->data);
5280 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5281 l2cap_conless_channel(conn, psm, skb);
5284 case L2CAP_CID_LE_DATA:
5285 l2cap_att_channel(conn, cid, skb);
5289 if (smp_sig_channel(conn, skb))
5290 l2cap_conn_del(conn->hcon, EACCES);
5294 l2cap_data_channel(conn, cid, skb);
5299 /* ---- L2CAP interface with lower layer (HCI) ---- */
5301 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5303 int exact = 0, lm1 = 0, lm2 = 0;
5304 struct l2cap_chan *c;
5306 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5308 /* Find listening sockets and check their link_mode */
5309 read_lock(&chan_list_lock);
5310 list_for_each_entry(c, &chan_list, global_l) {
5311 struct sock *sk = c->sk;
5313 if (c->state != BT_LISTEN)
5316 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5317 lm1 |= HCI_LM_ACCEPT;
5318 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5319 lm1 |= HCI_LM_MASTER;
5321 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5322 lm2 |= HCI_LM_ACCEPT;
5323 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5324 lm2 |= HCI_LM_MASTER;
5327 read_unlock(&chan_list_lock);
5329 return exact ? lm1 : lm2;
5332 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5334 struct l2cap_conn *conn;
5336 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5339 conn = l2cap_conn_add(hcon, status);
5341 l2cap_conn_ready(conn);
5343 l2cap_conn_del(hcon, bt_to_errno(status));
5348 int l2cap_disconn_ind(struct hci_conn *hcon)
5350 struct l2cap_conn *conn = hcon->l2cap_data;
5352 BT_DBG("hcon %p", hcon);
5355 return HCI_ERROR_REMOTE_USER_TERM;
5356 return conn->disc_reason;
5359 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5361 BT_DBG("hcon %p reason %d", hcon, reason);
5363 l2cap_conn_del(hcon, bt_to_errno(reason));
5367 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5369 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5372 if (encrypt == 0x00) {
5373 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5374 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5375 } else if (chan->sec_level == BT_SECURITY_HIGH)
5376 l2cap_chan_close(chan, ECONNREFUSED);
5378 if (chan->sec_level == BT_SECURITY_MEDIUM)
5379 __clear_chan_timer(chan);
5383 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5385 struct l2cap_conn *conn = hcon->l2cap_data;
5386 struct l2cap_chan *chan;
5391 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5393 if (hcon->type == LE_LINK) {
5394 if (!status && encrypt)
5395 smp_distribute_keys(conn, 0);
5396 cancel_delayed_work(&conn->security_timer);
5399 mutex_lock(&conn->chan_lock);
5401 list_for_each_entry(chan, &conn->chan_l, list) {
5402 l2cap_chan_lock(chan);
5404 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5405 state_to_string(chan->state));
5407 if (chan->scid == L2CAP_CID_LE_DATA) {
5408 if (!status && encrypt) {
5409 chan->sec_level = hcon->sec_level;
5410 l2cap_chan_ready(chan);
5413 l2cap_chan_unlock(chan);
5417 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5418 l2cap_chan_unlock(chan);
5422 if (!status && (chan->state == BT_CONNECTED ||
5423 chan->state == BT_CONFIG)) {
5424 struct sock *sk = chan->sk;
5426 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5427 sk->sk_state_change(sk);
5429 l2cap_check_encryption(chan, encrypt);
5430 l2cap_chan_unlock(chan);
5434 if (chan->state == BT_CONNECT) {
5436 l2cap_send_conn_req(chan);
5438 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5440 } else if (chan->state == BT_CONNECT2) {
5441 struct sock *sk = chan->sk;
5442 struct l2cap_conn_rsp rsp;
5448 if (test_bit(BT_SK_DEFER_SETUP,
5449 &bt_sk(sk)->flags)) {
5450 struct sock *parent = bt_sk(sk)->parent;
5451 res = L2CAP_CR_PEND;
5452 stat = L2CAP_CS_AUTHOR_PEND;
5454 parent->sk_data_ready(parent, 0);
5456 __l2cap_state_change(chan, BT_CONFIG);
5457 res = L2CAP_CR_SUCCESS;
5458 stat = L2CAP_CS_NO_INFO;
5461 __l2cap_state_change(chan, BT_DISCONN);
5462 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5463 res = L2CAP_CR_SEC_BLOCK;
5464 stat = L2CAP_CS_NO_INFO;
5469 rsp.scid = cpu_to_le16(chan->dcid);
5470 rsp.dcid = cpu_to_le16(chan->scid);
5471 rsp.result = cpu_to_le16(res);
5472 rsp.status = cpu_to_le16(stat);
5473 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5476 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5477 res == L2CAP_CR_SUCCESS) {
5479 set_bit(CONF_REQ_SENT, &chan->conf_state);
5480 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5482 l2cap_build_conf_req(chan, buf),
5484 chan->num_conf_req++;
5488 l2cap_chan_unlock(chan);
5491 mutex_unlock(&conn->chan_lock);
5496 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5498 struct l2cap_conn *conn = hcon->l2cap_data;
5501 conn = l2cap_conn_add(hcon, 0);
5506 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5508 if (!(flags & ACL_CONT)) {
5509 struct l2cap_hdr *hdr;
5513 BT_ERR("Unexpected start frame (len %d)", skb->len);
5514 kfree_skb(conn->rx_skb);
5515 conn->rx_skb = NULL;
5517 l2cap_conn_unreliable(conn, ECOMM);
5520 /* Start fragment always begin with Basic L2CAP header */
5521 if (skb->len < L2CAP_HDR_SIZE) {
5522 BT_ERR("Frame is too short (len %d)", skb->len);
5523 l2cap_conn_unreliable(conn, ECOMM);
5527 hdr = (struct l2cap_hdr *) skb->data;
5528 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5530 if (len == skb->len) {
5531 /* Complete frame received */
5532 l2cap_recv_frame(conn, skb);
5536 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5538 if (skb->len > len) {
5539 BT_ERR("Frame is too long (len %d, expected len %d)",
5541 l2cap_conn_unreliable(conn, ECOMM);
5545 /* Allocate skb for the complete frame (with header) */
5546 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5550 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5552 conn->rx_len = len - skb->len;
5554 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5556 if (!conn->rx_len) {
5557 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5558 l2cap_conn_unreliable(conn, ECOMM);
5562 if (skb->len > conn->rx_len) {
5563 BT_ERR("Fragment is too long (len %d, expected %d)",
5564 skb->len, conn->rx_len);
5565 kfree_skb(conn->rx_skb);
5566 conn->rx_skb = NULL;
5568 l2cap_conn_unreliable(conn, ECOMM);
5572 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5574 conn->rx_len -= skb->len;
5576 if (!conn->rx_len) {
5577 /* Complete frame received */
5578 l2cap_recv_frame(conn, conn->rx_skb);
5579 conn->rx_skb = NULL;
5588 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5590 struct l2cap_chan *c;
5592 read_lock(&chan_list_lock);
5594 list_for_each_entry(c, &chan_list, global_l) {
5595 struct sock *sk = c->sk;
5597 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5598 batostr(&bt_sk(sk)->src),
5599 batostr(&bt_sk(sk)->dst),
5600 c->state, __le16_to_cpu(c->psm),
5601 c->scid, c->dcid, c->imtu, c->omtu,
5602 c->sec_level, c->mode);
5605 read_unlock(&chan_list_lock);
5610 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5612 return single_open(file, l2cap_debugfs_show, inode->i_private);
5615 static const struct file_operations l2cap_debugfs_fops = {
5616 .open = l2cap_debugfs_open,
5618 .llseek = seq_lseek,
5619 .release = single_release,
5622 static struct dentry *l2cap_debugfs;
5624 int __init l2cap_init(void)
5628 err = l2cap_init_sockets();
5633 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5634 bt_debugfs, NULL, &l2cap_debugfs_fops);
5636 BT_ERR("Failed to create L2CAP debug file");
5642 void l2cap_exit(void)
5644 debugfs_remove(l2cap_debugfs);
5645 l2cap_cleanup_sockets();
5648 module_param(disable_ertm, bool, 0644);
5649 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");