2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
67 list_for_each_entry(c, &conn->chan_l, list) {
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
78 list_for_each_entry(c, &conn->chan_l, list) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
95 mutex_unlock(&conn->chan_lock);
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
102 struct l2cap_chan *c;
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
113 struct l2cap_chan *c;
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
126 write_lock(&chan_list_lock);
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
151 write_unlock(&chan_list_lock);
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
157 write_lock(&chan_list_lock);
161 write_unlock(&chan_list_lock);
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
184 chan->ops->state_change(chan, state);
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
189 struct sock *sk = chan->sk;
192 __l2cap_state_change(chan, state);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
198 struct sock *sk = chan->sk;
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
205 struct sock *sk = chan->sk;
208 __l2cap_chan_set_err(chan, err);
212 static void __set_retrans_timer(struct l2cap_chan *chan)
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
221 static void __set_monitor_timer(struct l2cap_chan *chan)
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
435 chan->sec_level = BT_SECURITY_LOW;
437 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
440 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
442 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
443 __le16_to_cpu(chan->psm), chan->dcid);
445 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
449 switch (chan->chan_type) {
450 case L2CAP_CHAN_CONN_ORIENTED:
451 if (conn->hcon->type == LE_LINK) {
453 chan->omtu = L2CAP_DEFAULT_MTU;
454 chan->scid = L2CAP_CID_LE_DATA;
455 chan->dcid = L2CAP_CID_LE_DATA;
457 /* Alloc CID for connection-oriented socket */
458 chan->scid = l2cap_alloc_cid(conn);
459 chan->omtu = L2CAP_DEFAULT_MTU;
463 case L2CAP_CHAN_CONN_LESS:
464 /* Connectionless socket */
465 chan->scid = L2CAP_CID_CONN_LESS;
466 chan->dcid = L2CAP_CID_CONN_LESS;
467 chan->omtu = L2CAP_DEFAULT_MTU;
470 case L2CAP_CHAN_CONN_FIX_A2MP:
471 chan->scid = L2CAP_CID_A2MP;
472 chan->dcid = L2CAP_CID_A2MP;
473 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
474 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
478 /* Raw socket can send/recv signalling messages only */
479 chan->scid = L2CAP_CID_SIGNALING;
480 chan->dcid = L2CAP_CID_SIGNALING;
481 chan->omtu = L2CAP_DEFAULT_MTU;
484 chan->local_id = L2CAP_BESTEFFORT_ID;
485 chan->local_stype = L2CAP_SERV_BESTEFFORT;
486 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
487 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
488 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
489 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
491 l2cap_chan_hold(chan);
493 list_add(&chan->list, &conn->chan_l);
496 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
498 mutex_lock(&conn->chan_lock);
499 __l2cap_chan_add(conn, chan);
500 mutex_unlock(&conn->chan_lock);
503 void l2cap_chan_del(struct l2cap_chan *chan, int err)
505 struct l2cap_conn *conn = chan->conn;
507 __clear_chan_timer(chan);
509 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
512 /* Delete from channel list */
513 list_del(&chan->list);
515 l2cap_chan_put(chan);
519 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
520 hci_conn_put(conn->hcon);
523 if (chan->ops->teardown)
524 chan->ops->teardown(chan, err);
526 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
530 case L2CAP_MODE_BASIC:
533 case L2CAP_MODE_ERTM:
534 __clear_retrans_timer(chan);
535 __clear_monitor_timer(chan);
536 __clear_ack_timer(chan);
538 skb_queue_purge(&chan->srej_q);
540 l2cap_seq_list_free(&chan->srej_list);
541 l2cap_seq_list_free(&chan->retrans_list);
545 case L2CAP_MODE_STREAMING:
546 skb_queue_purge(&chan->tx_q);
553 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
555 struct l2cap_conn *conn = chan->conn;
556 struct sock *sk = chan->sk;
558 BT_DBG("chan %p state %s sk %p", chan,
559 state_to_string(chan->state), sk);
561 switch (chan->state) {
563 if (chan->ops->teardown)
564 chan->ops->teardown(chan, 0);
569 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
570 conn->hcon->type == ACL_LINK) {
571 __set_chan_timer(chan, sk->sk_sndtimeo);
572 l2cap_send_disconn_req(conn, chan, reason);
574 l2cap_chan_del(chan, reason);
578 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
579 conn->hcon->type == ACL_LINK) {
580 struct l2cap_conn_rsp rsp;
583 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
584 result = L2CAP_CR_SEC_BLOCK;
586 result = L2CAP_CR_BAD_PSM;
587 l2cap_state_change(chan, BT_DISCONN);
589 rsp.scid = cpu_to_le16(chan->dcid);
590 rsp.dcid = cpu_to_le16(chan->scid);
591 rsp.result = cpu_to_le16(result);
592 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
593 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
597 l2cap_chan_del(chan, reason);
602 l2cap_chan_del(chan, reason);
606 if (chan->ops->teardown)
607 chan->ops->teardown(chan, 0);
612 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
614 if (chan->chan_type == L2CAP_CHAN_RAW) {
615 switch (chan->sec_level) {
616 case BT_SECURITY_HIGH:
617 return HCI_AT_DEDICATED_BONDING_MITM;
618 case BT_SECURITY_MEDIUM:
619 return HCI_AT_DEDICATED_BONDING;
621 return HCI_AT_NO_BONDING;
623 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
624 if (chan->sec_level == BT_SECURITY_LOW)
625 chan->sec_level = BT_SECURITY_SDP;
627 if (chan->sec_level == BT_SECURITY_HIGH)
628 return HCI_AT_NO_BONDING_MITM;
630 return HCI_AT_NO_BONDING;
632 switch (chan->sec_level) {
633 case BT_SECURITY_HIGH:
634 return HCI_AT_GENERAL_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_GENERAL_BONDING;
638 return HCI_AT_NO_BONDING;
643 /* Service level security */
644 int l2cap_chan_check_security(struct l2cap_chan *chan)
646 struct l2cap_conn *conn = chan->conn;
649 auth_type = l2cap_get_auth_type(chan);
651 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
654 static u8 l2cap_get_ident(struct l2cap_conn *conn)
658 /* Get next available identificator.
659 * 1 - 128 are used by kernel.
660 * 129 - 199 are reserved.
661 * 200 - 254 are used by utilities like l2ping, etc.
664 spin_lock(&conn->lock);
666 if (++conn->tx_ident > 128)
671 spin_unlock(&conn->lock);
676 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
678 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
681 BT_DBG("code 0x%2.2x", code);
686 if (lmp_no_flush_capable(conn->hcon->hdev))
687 flags = ACL_START_NO_FLUSH;
691 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
692 skb->priority = HCI_PRIO_MAX;
694 hci_send_acl(conn->hchan, skb, flags);
697 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
699 struct hci_conn *hcon = chan->conn->hcon;
702 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
705 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
706 lmp_no_flush_capable(hcon->hdev))
707 flags = ACL_START_NO_FLUSH;
711 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
712 hci_send_acl(chan->conn->hchan, skb, flags);
715 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
717 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
718 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
720 if (enh & L2CAP_CTRL_FRAME_TYPE) {
723 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
724 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
731 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
732 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
739 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
741 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
742 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
744 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
747 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
748 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
755 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
756 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
763 static inline void __unpack_control(struct l2cap_chan *chan,
766 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
767 __unpack_extended_control(get_unaligned_le32(skb->data),
768 &bt_cb(skb)->control);
769 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
771 __unpack_enhanced_control(get_unaligned_le16(skb->data),
772 &bt_cb(skb)->control);
773 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
777 static u32 __pack_extended_control(struct l2cap_ctrl *control)
781 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
782 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
784 if (control->sframe) {
785 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
786 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
787 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
789 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
790 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
796 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
800 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_CTRL_FRAME_TYPE;
808 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
815 static inline void __pack_control(struct l2cap_chan *chan,
816 struct l2cap_ctrl *control,
819 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
820 put_unaligned_le32(__pack_extended_control(control),
821 skb->data + L2CAP_HDR_SIZE);
823 put_unaligned_le16(__pack_enhanced_control(control),
824 skb->data + L2CAP_HDR_SIZE);
828 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
830 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
831 return L2CAP_EXT_HDR_SIZE;
833 return L2CAP_ENH_HDR_SIZE;
836 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
840 struct l2cap_hdr *lh;
841 int hlen = __ertm_hdr_size(chan);
843 if (chan->fcs == L2CAP_FCS_CRC16)
844 hlen += L2CAP_FCS_SIZE;
846 skb = bt_skb_alloc(hlen, GFP_KERNEL);
849 return ERR_PTR(-ENOMEM);
851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
852 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
853 lh->cid = cpu_to_le16(chan->dcid);
855 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
856 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
858 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
860 if (chan->fcs == L2CAP_FCS_CRC16) {
861 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
862 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
865 skb->priority = HCI_PRIO_MAX;
869 static void l2cap_send_sframe(struct l2cap_chan *chan,
870 struct l2cap_ctrl *control)
875 BT_DBG("chan %p, control %p", chan, control);
877 if (!control->sframe)
880 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
884 if (control->super == L2CAP_SUPER_RR)
885 clear_bit(CONN_RNR_SENT, &chan->conn_state);
886 else if (control->super == L2CAP_SUPER_RNR)
887 set_bit(CONN_RNR_SENT, &chan->conn_state);
889 if (control->super != L2CAP_SUPER_SREJ) {
890 chan->last_acked_seq = control->reqseq;
891 __clear_ack_timer(chan);
894 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
895 control->final, control->poll, control->super);
897 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
898 control_field = __pack_extended_control(control);
900 control_field = __pack_enhanced_control(control);
902 skb = l2cap_create_sframe_pdu(chan, control_field);
904 l2cap_do_send(chan, skb);
907 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
909 struct l2cap_ctrl control;
911 BT_DBG("chan %p, poll %d", chan, poll);
913 memset(&control, 0, sizeof(control));
917 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
918 control.super = L2CAP_SUPER_RNR;
920 control.super = L2CAP_SUPER_RR;
922 control.reqseq = chan->buffer_seq;
923 l2cap_send_sframe(chan, &control);
926 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
928 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
931 static void l2cap_send_conn_req(struct l2cap_chan *chan)
933 struct l2cap_conn *conn = chan->conn;
934 struct l2cap_conn_req req;
936 req.scid = cpu_to_le16(chan->scid);
939 chan->ident = l2cap_get_ident(conn);
941 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
943 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
946 static void l2cap_chan_ready(struct l2cap_chan *chan)
948 /* This clears all conf flags, including CONF_NOT_COMPLETE */
949 chan->conf_state = 0;
950 __clear_chan_timer(chan);
952 chan->state = BT_CONNECTED;
954 chan->ops->ready(chan);
957 static void l2cap_do_start(struct l2cap_chan *chan)
959 struct l2cap_conn *conn = chan->conn;
961 if (conn->hcon->type == LE_LINK) {
962 l2cap_chan_ready(chan);
966 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
967 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
970 if (l2cap_chan_check_security(chan) &&
971 __l2cap_no_conn_pending(chan))
972 l2cap_send_conn_req(chan);
974 struct l2cap_info_req req;
975 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
978 conn->info_ident = l2cap_get_ident(conn);
980 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
982 l2cap_send_cmd(conn, conn->info_ident,
983 L2CAP_INFO_REQ, sizeof(req), &req);
987 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
989 u32 local_feat_mask = l2cap_feat_mask;
991 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
994 case L2CAP_MODE_ERTM:
995 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
996 case L2CAP_MODE_STREAMING:
997 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1003 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1005 struct sock *sk = chan->sk;
1006 struct l2cap_disconn_req req;
1011 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1012 __clear_retrans_timer(chan);
1013 __clear_monitor_timer(chan);
1014 __clear_ack_timer(chan);
1017 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1018 __l2cap_state_change(chan, BT_DISCONN);
1022 req.dcid = cpu_to_le16(chan->dcid);
1023 req.scid = cpu_to_le16(chan->scid);
1024 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1025 L2CAP_DISCONN_REQ, sizeof(req), &req);
1028 __l2cap_state_change(chan, BT_DISCONN);
1029 __l2cap_chan_set_err(chan, err);
1033 /* ---- L2CAP connections ---- */
1034 static void l2cap_conn_start(struct l2cap_conn *conn)
1036 struct l2cap_chan *chan, *tmp;
1038 BT_DBG("conn %p", conn);
1040 mutex_lock(&conn->chan_lock);
1042 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1043 struct sock *sk = chan->sk;
1045 l2cap_chan_lock(chan);
1047 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1048 l2cap_chan_unlock(chan);
1052 if (chan->state == BT_CONNECT) {
1053 if (!l2cap_chan_check_security(chan) ||
1054 !__l2cap_no_conn_pending(chan)) {
1055 l2cap_chan_unlock(chan);
1059 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1060 && test_bit(CONF_STATE2_DEVICE,
1061 &chan->conf_state)) {
1062 l2cap_chan_close(chan, ECONNRESET);
1063 l2cap_chan_unlock(chan);
1067 l2cap_send_conn_req(chan);
1069 } else if (chan->state == BT_CONNECT2) {
1070 struct l2cap_conn_rsp rsp;
1072 rsp.scid = cpu_to_le16(chan->dcid);
1073 rsp.dcid = cpu_to_le16(chan->scid);
1075 if (l2cap_chan_check_security(chan)) {
1077 if (test_bit(BT_SK_DEFER_SETUP,
1078 &bt_sk(sk)->flags)) {
1079 struct sock *parent = bt_sk(sk)->parent;
1080 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1081 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1083 parent->sk_data_ready(parent, 0);
1086 __l2cap_state_change(chan, BT_CONFIG);
1087 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1088 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1092 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1093 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1096 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1099 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1100 rsp.result != L2CAP_CR_SUCCESS) {
1101 l2cap_chan_unlock(chan);
1105 set_bit(CONF_REQ_SENT, &chan->conf_state);
1106 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1107 l2cap_build_conf_req(chan, buf), buf);
1108 chan->num_conf_req++;
1111 l2cap_chan_unlock(chan);
1114 mutex_unlock(&conn->chan_lock);
1117 /* Find socket with cid and source/destination bdaddr.
1118 * Returns closest match, locked.
1120 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1124 struct l2cap_chan *c, *c1 = NULL;
1126 read_lock(&chan_list_lock);
1128 list_for_each_entry(c, &chan_list, global_l) {
1129 struct sock *sk = c->sk;
1131 if (state && c->state != state)
1134 if (c->scid == cid) {
1135 int src_match, dst_match;
1136 int src_any, dst_any;
1139 src_match = !bacmp(&bt_sk(sk)->src, src);
1140 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1141 if (src_match && dst_match) {
1142 read_unlock(&chan_list_lock);
1147 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1148 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1149 if ((src_match && dst_any) || (src_any && dst_match) ||
1150 (src_any && dst_any))
1155 read_unlock(&chan_list_lock);
1160 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1162 struct sock *parent, *sk;
1163 struct l2cap_chan *chan, *pchan;
1167 /* Check if we have socket listening on cid */
1168 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1169 conn->src, conn->dst);
1177 chan = pchan->ops->new_connection(pchan);
1183 hci_conn_hold(conn->hcon);
1184 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1186 bacpy(&bt_sk(sk)->src, conn->src);
1187 bacpy(&bt_sk(sk)->dst, conn->dst);
1189 bt_accept_enqueue(parent, sk);
1191 l2cap_chan_add(conn, chan);
1193 l2cap_chan_ready(chan);
1196 release_sock(parent);
1199 static void l2cap_conn_ready(struct l2cap_conn *conn)
1201 struct l2cap_chan *chan;
1202 struct hci_conn *hcon = conn->hcon;
1204 BT_DBG("conn %p", conn);
1206 if (!hcon->out && hcon->type == LE_LINK)
1207 l2cap_le_conn_ready(conn);
1209 if (hcon->out && hcon->type == LE_LINK)
1210 smp_conn_security(hcon, hcon->pending_sec_level);
1212 mutex_lock(&conn->chan_lock);
1214 list_for_each_entry(chan, &conn->chan_l, list) {
1216 l2cap_chan_lock(chan);
1218 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1219 l2cap_chan_unlock(chan);
1223 if (hcon->type == LE_LINK) {
1224 if (smp_conn_security(hcon, chan->sec_level))
1225 l2cap_chan_ready(chan);
1227 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 struct sock *sk = chan->sk;
1229 __clear_chan_timer(chan);
1231 __l2cap_state_change(chan, BT_CONNECTED);
1232 sk->sk_state_change(sk);
1235 } else if (chan->state == BT_CONNECT)
1236 l2cap_do_start(chan);
1238 l2cap_chan_unlock(chan);
1241 mutex_unlock(&conn->chan_lock);
1244 /* Notify sockets that we cannot guaranty reliability anymore */
1245 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1247 struct l2cap_chan *chan;
1249 BT_DBG("conn %p", conn);
1251 mutex_lock(&conn->chan_lock);
1253 list_for_each_entry(chan, &conn->chan_l, list) {
1254 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1255 __l2cap_chan_set_err(chan, err);
1258 mutex_unlock(&conn->chan_lock);
1261 static void l2cap_info_timeout(struct work_struct *work)
1263 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1266 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1267 conn->info_ident = 0;
1269 l2cap_conn_start(conn);
1272 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1274 struct l2cap_conn *conn = hcon->l2cap_data;
1275 struct l2cap_chan *chan, *l;
1280 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1282 kfree_skb(conn->rx_skb);
1284 mutex_lock(&conn->chan_lock);
1287 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1288 l2cap_chan_hold(chan);
1289 l2cap_chan_lock(chan);
1291 l2cap_chan_del(chan, err);
1293 l2cap_chan_unlock(chan);
1295 chan->ops->close(chan);
1296 l2cap_chan_put(chan);
1299 mutex_unlock(&conn->chan_lock);
1301 hci_chan_del(conn->hchan);
1303 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1304 cancel_delayed_work_sync(&conn->info_timer);
1306 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1307 cancel_delayed_work_sync(&conn->security_timer);
1308 smp_chan_destroy(conn);
1311 hcon->l2cap_data = NULL;
1315 static void security_timeout(struct work_struct *work)
1317 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1318 security_timer.work);
1320 BT_DBG("conn %p", conn);
1322 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1323 smp_chan_destroy(conn);
1324 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1328 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1330 struct l2cap_conn *conn = hcon->l2cap_data;
1331 struct hci_chan *hchan;
1336 hchan = hci_chan_create(hcon);
1340 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1342 hci_chan_del(hchan);
1346 hcon->l2cap_data = conn;
1348 conn->hchan = hchan;
1350 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1352 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1353 conn->mtu = hcon->hdev->le_mtu;
1355 conn->mtu = hcon->hdev->acl_mtu;
1357 conn->src = &hcon->hdev->bdaddr;
1358 conn->dst = &hcon->dst;
1360 conn->feat_mask = 0;
1362 spin_lock_init(&conn->lock);
1363 mutex_init(&conn->chan_lock);
1365 INIT_LIST_HEAD(&conn->chan_l);
1367 if (hcon->type == LE_LINK)
1368 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1370 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1372 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1377 /* ---- Socket interface ---- */
1379 /* Find socket with psm and source / destination bdaddr.
1380 * Returns closest match.
1382 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1386 struct l2cap_chan *c, *c1 = NULL;
1388 read_lock(&chan_list_lock);
1390 list_for_each_entry(c, &chan_list, global_l) {
1391 struct sock *sk = c->sk;
1393 if (state && c->state != state)
1396 if (c->psm == psm) {
1397 int src_match, dst_match;
1398 int src_any, dst_any;
1401 src_match = !bacmp(&bt_sk(sk)->src, src);
1402 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1403 if (src_match && dst_match) {
1404 read_unlock(&chan_list_lock);
1409 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1410 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1411 if ((src_match && dst_any) || (src_any && dst_match) ||
1412 (src_any && dst_any))
1417 read_unlock(&chan_list_lock);
1422 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1423 bdaddr_t *dst, u8 dst_type)
1425 struct sock *sk = chan->sk;
1426 bdaddr_t *src = &bt_sk(sk)->src;
1427 struct l2cap_conn *conn;
1428 struct hci_conn *hcon;
1429 struct hci_dev *hdev;
1433 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1434 dst_type, __le16_to_cpu(chan->psm));
1436 hdev = hci_get_route(dst, src);
1438 return -EHOSTUNREACH;
1442 l2cap_chan_lock(chan);
1444 /* PSM must be odd and lsb of upper byte must be 0 */
1445 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1446 chan->chan_type != L2CAP_CHAN_RAW) {
1451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1456 switch (chan->mode) {
1457 case L2CAP_MODE_BASIC:
1459 case L2CAP_MODE_ERTM:
1460 case L2CAP_MODE_STREAMING:
1469 switch (chan->state) {
1473 /* Already connecting */
1478 /* Already connected */
1492 /* Set destination address and psm */
1494 bacpy(&bt_sk(sk)->dst, dst);
1500 auth_type = l2cap_get_auth_type(chan);
1502 if (chan->dcid == L2CAP_CID_LE_DATA)
1503 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1504 chan->sec_level, auth_type);
1506 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1507 chan->sec_level, auth_type);
1510 err = PTR_ERR(hcon);
1514 conn = l2cap_conn_add(hcon, 0);
1521 if (hcon->type == LE_LINK) {
1524 if (!list_empty(&conn->chan_l)) {
1533 /* Update source addr of the socket */
1534 bacpy(src, conn->src);
1536 l2cap_chan_unlock(chan);
1537 l2cap_chan_add(conn, chan);
1538 l2cap_chan_lock(chan);
1540 l2cap_state_change(chan, BT_CONNECT);
1541 __set_chan_timer(chan, sk->sk_sndtimeo);
1543 if (hcon->state == BT_CONNECTED) {
1544 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1545 __clear_chan_timer(chan);
1546 if (l2cap_chan_check_security(chan))
1547 l2cap_state_change(chan, BT_CONNECTED);
1549 l2cap_do_start(chan);
1555 l2cap_chan_unlock(chan);
1556 hci_dev_unlock(hdev);
1561 int __l2cap_wait_ack(struct sock *sk)
1563 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1564 DECLARE_WAITQUEUE(wait, current);
1568 add_wait_queue(sk_sleep(sk), &wait);
1569 set_current_state(TASK_INTERRUPTIBLE);
1570 while (chan->unacked_frames > 0 && chan->conn) {
1574 if (signal_pending(current)) {
1575 err = sock_intr_errno(timeo);
1580 timeo = schedule_timeout(timeo);
1582 set_current_state(TASK_INTERRUPTIBLE);
1584 err = sock_error(sk);
1588 set_current_state(TASK_RUNNING);
1589 remove_wait_queue(sk_sleep(sk), &wait);
1593 static void l2cap_monitor_timeout(struct work_struct *work)
1595 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1596 monitor_timer.work);
1598 BT_DBG("chan %p", chan);
1600 l2cap_chan_lock(chan);
1603 l2cap_chan_unlock(chan);
1604 l2cap_chan_put(chan);
1608 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1610 l2cap_chan_unlock(chan);
1611 l2cap_chan_put(chan);
1614 static void l2cap_retrans_timeout(struct work_struct *work)
1616 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1617 retrans_timer.work);
1619 BT_DBG("chan %p", chan);
1621 l2cap_chan_lock(chan);
1624 l2cap_chan_unlock(chan);
1625 l2cap_chan_put(chan);
1629 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1630 l2cap_chan_unlock(chan);
1631 l2cap_chan_put(chan);
1634 static void l2cap_streaming_send(struct l2cap_chan *chan,
1635 struct sk_buff_head *skbs)
1637 struct sk_buff *skb;
1638 struct l2cap_ctrl *control;
1640 BT_DBG("chan %p, skbs %p", chan, skbs);
1642 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1644 while (!skb_queue_empty(&chan->tx_q)) {
1646 skb = skb_dequeue(&chan->tx_q);
1648 bt_cb(skb)->control.retries = 1;
1649 control = &bt_cb(skb)->control;
1651 control->reqseq = 0;
1652 control->txseq = chan->next_tx_seq;
1654 __pack_control(chan, control, skb);
1656 if (chan->fcs == L2CAP_FCS_CRC16) {
1657 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1658 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1661 l2cap_do_send(chan, skb);
1663 BT_DBG("Sent txseq %u", control->txseq);
1665 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1666 chan->frames_sent++;
1670 static int l2cap_ertm_send(struct l2cap_chan *chan)
1672 struct sk_buff *skb, *tx_skb;
1673 struct l2cap_ctrl *control;
1676 BT_DBG("chan %p", chan);
1678 if (chan->state != BT_CONNECTED)
1681 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1684 while (chan->tx_send_head &&
1685 chan->unacked_frames < chan->remote_tx_win &&
1686 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1688 skb = chan->tx_send_head;
1690 bt_cb(skb)->control.retries = 1;
1691 control = &bt_cb(skb)->control;
1693 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1696 control->reqseq = chan->buffer_seq;
1697 chan->last_acked_seq = chan->buffer_seq;
1698 control->txseq = chan->next_tx_seq;
1700 __pack_control(chan, control, skb);
1702 if (chan->fcs == L2CAP_FCS_CRC16) {
1703 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1704 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1707 /* Clone after data has been modified. Data is assumed to be
1708 read-only (for locking purposes) on cloned sk_buffs.
1710 tx_skb = skb_clone(skb, GFP_KERNEL);
1715 __set_retrans_timer(chan);
1717 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1718 chan->unacked_frames++;
1719 chan->frames_sent++;
1722 if (skb_queue_is_last(&chan->tx_q, skb))
1723 chan->tx_send_head = NULL;
1725 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1727 l2cap_do_send(chan, tx_skb);
1728 BT_DBG("Sent txseq %u", control->txseq);
1731 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1732 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1737 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1739 struct l2cap_ctrl control;
1740 struct sk_buff *skb;
1741 struct sk_buff *tx_skb;
1744 BT_DBG("chan %p", chan);
1746 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1749 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1750 seq = l2cap_seq_list_pop(&chan->retrans_list);
1752 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1754 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1759 bt_cb(skb)->control.retries++;
1760 control = bt_cb(skb)->control;
1762 if (chan->max_tx != 0 &&
1763 bt_cb(skb)->control.retries > chan->max_tx) {
1764 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1765 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1766 l2cap_seq_list_clear(&chan->retrans_list);
1770 control.reqseq = chan->buffer_seq;
1771 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1776 if (skb_cloned(skb)) {
1777 /* Cloned sk_buffs are read-only, so we need a
1780 tx_skb = skb_copy(skb, GFP_ATOMIC);
1782 tx_skb = skb_clone(skb, GFP_ATOMIC);
1786 l2cap_seq_list_clear(&chan->retrans_list);
1790 /* Update skb contents */
1791 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1792 put_unaligned_le32(__pack_extended_control(&control),
1793 tx_skb->data + L2CAP_HDR_SIZE);
1795 put_unaligned_le16(__pack_enhanced_control(&control),
1796 tx_skb->data + L2CAP_HDR_SIZE);
1799 if (chan->fcs == L2CAP_FCS_CRC16) {
1800 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1801 put_unaligned_le16(fcs, skb_put(tx_skb,
1805 l2cap_do_send(chan, tx_skb);
1807 BT_DBG("Resent txseq %d", control.txseq);
1809 chan->last_acked_seq = chan->buffer_seq;
1813 static void l2cap_retransmit(struct l2cap_chan *chan,
1814 struct l2cap_ctrl *control)
1816 BT_DBG("chan %p, control %p", chan, control);
1818 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1819 l2cap_ertm_resend(chan);
1822 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1823 struct l2cap_ctrl *control)
1825 struct sk_buff *skb;
1827 BT_DBG("chan %p, control %p", chan, control);
1830 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1832 l2cap_seq_list_clear(&chan->retrans_list);
1834 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1837 if (chan->unacked_frames) {
1838 skb_queue_walk(&chan->tx_q, skb) {
1839 if (bt_cb(skb)->control.txseq == control->reqseq ||
1840 skb == chan->tx_send_head)
1844 skb_queue_walk_from(&chan->tx_q, skb) {
1845 if (skb == chan->tx_send_head)
1848 l2cap_seq_list_append(&chan->retrans_list,
1849 bt_cb(skb)->control.txseq);
1852 l2cap_ertm_resend(chan);
1856 static void l2cap_send_ack(struct l2cap_chan *chan)
1858 struct l2cap_ctrl control;
1859 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1860 chan->last_acked_seq);
1863 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1864 chan, chan->last_acked_seq, chan->buffer_seq);
1866 memset(&control, 0, sizeof(control));
1869 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1870 chan->rx_state == L2CAP_RX_STATE_RECV) {
1871 __clear_ack_timer(chan);
1872 control.super = L2CAP_SUPER_RNR;
1873 control.reqseq = chan->buffer_seq;
1874 l2cap_send_sframe(chan, &control);
1876 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1877 l2cap_ertm_send(chan);
1878 /* If any i-frames were sent, they included an ack */
1879 if (chan->buffer_seq == chan->last_acked_seq)
1883 /* Ack now if the window is 3/4ths full.
1884 * Calculate without mul or div
1886 threshold = chan->ack_win;
1887 threshold += threshold << 1;
1890 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1893 if (frames_to_ack >= threshold) {
1894 __clear_ack_timer(chan);
1895 control.super = L2CAP_SUPER_RR;
1896 control.reqseq = chan->buffer_seq;
1897 l2cap_send_sframe(chan, &control);
1902 __set_ack_timer(chan);
1906 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1907 struct msghdr *msg, int len,
1908 int count, struct sk_buff *skb)
1910 struct l2cap_conn *conn = chan->conn;
1911 struct sk_buff **frag;
1914 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1920 /* Continuation fragments (no L2CAP header) */
1921 frag = &skb_shinfo(skb)->frag_list;
1923 struct sk_buff *tmp;
1925 count = min_t(unsigned int, conn->mtu, len);
1927 tmp = chan->ops->alloc_skb(chan, count,
1928 msg->msg_flags & MSG_DONTWAIT);
1930 return PTR_ERR(tmp);
1934 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1937 (*frag)->priority = skb->priority;
1942 skb->len += (*frag)->len;
1943 skb->data_len += (*frag)->len;
1945 frag = &(*frag)->next;
1951 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1952 struct msghdr *msg, size_t len,
1955 struct l2cap_conn *conn = chan->conn;
1956 struct sk_buff *skb;
1957 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1958 struct l2cap_hdr *lh;
1960 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1962 count = min_t(unsigned int, (conn->mtu - hlen), len);
1964 skb = chan->ops->alloc_skb(chan, count + hlen,
1965 msg->msg_flags & MSG_DONTWAIT);
1969 skb->priority = priority;
1971 /* Create L2CAP header */
1972 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1973 lh->cid = cpu_to_le16(chan->dcid);
1974 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1975 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1977 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1978 if (unlikely(err < 0)) {
1980 return ERR_PTR(err);
1985 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1986 struct msghdr *msg, size_t len,
1989 struct l2cap_conn *conn = chan->conn;
1990 struct sk_buff *skb;
1992 struct l2cap_hdr *lh;
1994 BT_DBG("chan %p len %zu", chan, len);
1996 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1998 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1999 msg->msg_flags & MSG_DONTWAIT);
2003 skb->priority = priority;
2005 /* Create L2CAP header */
2006 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2007 lh->cid = cpu_to_le16(chan->dcid);
2008 lh->len = cpu_to_le16(len);
2010 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2011 if (unlikely(err < 0)) {
2013 return ERR_PTR(err);
2018 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2019 struct msghdr *msg, size_t len,
2022 struct l2cap_conn *conn = chan->conn;
2023 struct sk_buff *skb;
2024 int err, count, hlen;
2025 struct l2cap_hdr *lh;
2027 BT_DBG("chan %p len %zu", chan, len);
2030 return ERR_PTR(-ENOTCONN);
2032 hlen = __ertm_hdr_size(chan);
2035 hlen += L2CAP_SDULEN_SIZE;
2037 if (chan->fcs == L2CAP_FCS_CRC16)
2038 hlen += L2CAP_FCS_SIZE;
2040 count = min_t(unsigned int, (conn->mtu - hlen), len);
2042 skb = chan->ops->alloc_skb(chan, count + hlen,
2043 msg->msg_flags & MSG_DONTWAIT);
2047 /* Create L2CAP header */
2048 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2049 lh->cid = cpu_to_le16(chan->dcid);
2050 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2052 /* Control header is populated later */
2053 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2054 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2056 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2059 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2061 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2062 if (unlikely(err < 0)) {
2064 return ERR_PTR(err);
2067 bt_cb(skb)->control.fcs = chan->fcs;
2068 bt_cb(skb)->control.retries = 0;
2072 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2073 struct sk_buff_head *seg_queue,
2074 struct msghdr *msg, size_t len)
2076 struct sk_buff *skb;
2081 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2083 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2084 * so fragmented skbs are not used. The HCI layer's handling
2085 * of fragmented skbs is not compatible with ERTM's queueing.
2088 /* PDU size is derived from the HCI MTU */
2089 pdu_len = chan->conn->mtu;
2091 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2093 /* Adjust for largest possible L2CAP overhead. */
2095 pdu_len -= L2CAP_FCS_SIZE;
2097 pdu_len -= __ertm_hdr_size(chan);
2099 /* Remote device may have requested smaller PDUs */
2100 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2102 if (len <= pdu_len) {
2103 sar = L2CAP_SAR_UNSEGMENTED;
2107 sar = L2CAP_SAR_START;
2109 pdu_len -= L2CAP_SDULEN_SIZE;
2113 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2116 __skb_queue_purge(seg_queue);
2117 return PTR_ERR(skb);
2120 bt_cb(skb)->control.sar = sar;
2121 __skb_queue_tail(seg_queue, skb);
2126 pdu_len += L2CAP_SDULEN_SIZE;
2129 if (len <= pdu_len) {
2130 sar = L2CAP_SAR_END;
2133 sar = L2CAP_SAR_CONTINUE;
2140 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2143 struct sk_buff *skb;
2145 struct sk_buff_head seg_queue;
2147 /* Connectionless channel */
2148 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2149 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2151 return PTR_ERR(skb);
2153 l2cap_do_send(chan, skb);
2157 switch (chan->mode) {
2158 case L2CAP_MODE_BASIC:
2159 /* Check outgoing MTU */
2160 if (len > chan->omtu)
2163 /* Create a basic PDU */
2164 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2166 return PTR_ERR(skb);
2168 l2cap_do_send(chan, skb);
2172 case L2CAP_MODE_ERTM:
2173 case L2CAP_MODE_STREAMING:
2174 /* Check outgoing MTU */
2175 if (len > chan->omtu) {
2180 __skb_queue_head_init(&seg_queue);
2182 /* Do segmentation before calling in to the state machine,
2183 * since it's possible to block while waiting for memory
2186 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2188 /* The channel could have been closed while segmenting,
2189 * check that it is still connected.
2191 if (chan->state != BT_CONNECTED) {
2192 __skb_queue_purge(&seg_queue);
2199 if (chan->mode == L2CAP_MODE_ERTM)
2200 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2202 l2cap_streaming_send(chan, &seg_queue);
2206 /* If the skbs were not queued for sending, they'll still be in
2207 * seg_queue and need to be purged.
2209 __skb_queue_purge(&seg_queue);
2213 BT_DBG("bad state %1.1x", chan->mode);
2220 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2222 struct l2cap_ctrl control;
2225 BT_DBG("chan %p, txseq %u", chan, txseq);
2227 memset(&control, 0, sizeof(control));
2229 control.super = L2CAP_SUPER_SREJ;
2231 for (seq = chan->expected_tx_seq; seq != txseq;
2232 seq = __next_seq(chan, seq)) {
2233 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2234 control.reqseq = seq;
2235 l2cap_send_sframe(chan, &control);
2236 l2cap_seq_list_append(&chan->srej_list, seq);
2240 chan->expected_tx_seq = __next_seq(chan, txseq);
2243 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2245 struct l2cap_ctrl control;
2247 BT_DBG("chan %p", chan);
2249 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2252 memset(&control, 0, sizeof(control));
2254 control.super = L2CAP_SUPER_SREJ;
2255 control.reqseq = chan->srej_list.tail;
2256 l2cap_send_sframe(chan, &control);
2259 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2261 struct l2cap_ctrl control;
2265 BT_DBG("chan %p, txseq %u", chan, txseq);
2267 memset(&control, 0, sizeof(control));
2269 control.super = L2CAP_SUPER_SREJ;
2271 /* Capture initial list head to allow only one pass through the list. */
2272 initial_head = chan->srej_list.head;
2275 seq = l2cap_seq_list_pop(&chan->srej_list);
2276 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2279 control.reqseq = seq;
2280 l2cap_send_sframe(chan, &control);
2281 l2cap_seq_list_append(&chan->srej_list, seq);
2282 } while (chan->srej_list.head != initial_head);
2285 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2287 struct sk_buff *acked_skb;
2290 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2292 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2295 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2296 chan->expected_ack_seq, chan->unacked_frames);
2298 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2299 ackseq = __next_seq(chan, ackseq)) {
2301 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2303 skb_unlink(acked_skb, &chan->tx_q);
2304 kfree_skb(acked_skb);
2305 chan->unacked_frames--;
2309 chan->expected_ack_seq = reqseq;
2311 if (chan->unacked_frames == 0)
2312 __clear_retrans_timer(chan);
2314 BT_DBG("unacked_frames %u", chan->unacked_frames);
2317 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2319 BT_DBG("chan %p", chan);
2321 chan->expected_tx_seq = chan->buffer_seq;
2322 l2cap_seq_list_clear(&chan->srej_list);
2323 skb_queue_purge(&chan->srej_q);
2324 chan->rx_state = L2CAP_RX_STATE_RECV;
2327 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2328 struct l2cap_ctrl *control,
2329 struct sk_buff_head *skbs, u8 event)
2331 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2335 case L2CAP_EV_DATA_REQUEST:
2336 if (chan->tx_send_head == NULL)
2337 chan->tx_send_head = skb_peek(skbs);
2339 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2340 l2cap_ertm_send(chan);
2342 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2343 BT_DBG("Enter LOCAL_BUSY");
2344 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2346 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2347 /* The SREJ_SENT state must be aborted if we are to
2348 * enter the LOCAL_BUSY state.
2350 l2cap_abort_rx_srej_sent(chan);
2353 l2cap_send_ack(chan);
2356 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2357 BT_DBG("Exit LOCAL_BUSY");
2358 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2360 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2361 struct l2cap_ctrl local_control;
2363 memset(&local_control, 0, sizeof(local_control));
2364 local_control.sframe = 1;
2365 local_control.super = L2CAP_SUPER_RR;
2366 local_control.poll = 1;
2367 local_control.reqseq = chan->buffer_seq;
2368 l2cap_send_sframe(chan, &local_control);
2370 chan->retry_count = 1;
2371 __set_monitor_timer(chan);
2372 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2375 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2376 l2cap_process_reqseq(chan, control->reqseq);
2378 case L2CAP_EV_EXPLICIT_POLL:
2379 l2cap_send_rr_or_rnr(chan, 1);
2380 chan->retry_count = 1;
2381 __set_monitor_timer(chan);
2382 __clear_ack_timer(chan);
2383 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2385 case L2CAP_EV_RETRANS_TO:
2386 l2cap_send_rr_or_rnr(chan, 1);
2387 chan->retry_count = 1;
2388 __set_monitor_timer(chan);
2389 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2391 case L2CAP_EV_RECV_FBIT:
2392 /* Nothing to process */
2399 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2400 struct l2cap_ctrl *control,
2401 struct sk_buff_head *skbs, u8 event)
2403 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2407 case L2CAP_EV_DATA_REQUEST:
2408 if (chan->tx_send_head == NULL)
2409 chan->tx_send_head = skb_peek(skbs);
2410 /* Queue data, but don't send. */
2411 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2413 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2414 BT_DBG("Enter LOCAL_BUSY");
2415 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2417 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2418 /* The SREJ_SENT state must be aborted if we are to
2419 * enter the LOCAL_BUSY state.
2421 l2cap_abort_rx_srej_sent(chan);
2424 l2cap_send_ack(chan);
2427 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2428 BT_DBG("Exit LOCAL_BUSY");
2429 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2431 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2432 struct l2cap_ctrl local_control;
2433 memset(&local_control, 0, sizeof(local_control));
2434 local_control.sframe = 1;
2435 local_control.super = L2CAP_SUPER_RR;
2436 local_control.poll = 1;
2437 local_control.reqseq = chan->buffer_seq;
2438 l2cap_send_sframe(chan, &local_control);
2440 chan->retry_count = 1;
2441 __set_monitor_timer(chan);
2442 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2445 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2446 l2cap_process_reqseq(chan, control->reqseq);
2450 case L2CAP_EV_RECV_FBIT:
2451 if (control && control->final) {
2452 __clear_monitor_timer(chan);
2453 if (chan->unacked_frames > 0)
2454 __set_retrans_timer(chan);
2455 chan->retry_count = 0;
2456 chan->tx_state = L2CAP_TX_STATE_XMIT;
2457 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2460 case L2CAP_EV_EXPLICIT_POLL:
2463 case L2CAP_EV_MONITOR_TO:
2464 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2465 l2cap_send_rr_or_rnr(chan, 1);
2466 __set_monitor_timer(chan);
2467 chan->retry_count++;
2469 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2477 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2478 struct sk_buff_head *skbs, u8 event)
2480 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2481 chan, control, skbs, event, chan->tx_state);
2483 switch (chan->tx_state) {
2484 case L2CAP_TX_STATE_XMIT:
2485 l2cap_tx_state_xmit(chan, control, skbs, event);
2487 case L2CAP_TX_STATE_WAIT_F:
2488 l2cap_tx_state_wait_f(chan, control, skbs, event);
2496 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2497 struct l2cap_ctrl *control)
2499 BT_DBG("chan %p, control %p", chan, control);
2500 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2503 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2504 struct l2cap_ctrl *control)
2506 BT_DBG("chan %p, control %p", chan, control);
2507 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2510 /* Copy frame to all raw sockets on that connection */
2511 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2513 struct sk_buff *nskb;
2514 struct l2cap_chan *chan;
2516 BT_DBG("conn %p", conn);
2518 mutex_lock(&conn->chan_lock);
2520 list_for_each_entry(chan, &conn->chan_l, list) {
2521 struct sock *sk = chan->sk;
2522 if (chan->chan_type != L2CAP_CHAN_RAW)
2525 /* Don't send frame to the socket it came from */
2528 nskb = skb_clone(skb, GFP_ATOMIC);
2532 if (chan->ops->recv(chan, nskb))
2536 mutex_unlock(&conn->chan_lock);
2539 /* ---- L2CAP signalling commands ---- */
2540 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2541 u8 ident, u16 dlen, void *data)
2543 struct sk_buff *skb, **frag;
2544 struct l2cap_cmd_hdr *cmd;
2545 struct l2cap_hdr *lh;
2548 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2549 conn, code, ident, dlen);
2551 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2552 count = min_t(unsigned int, conn->mtu, len);
2554 skb = bt_skb_alloc(count, GFP_ATOMIC);
2558 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2559 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2561 if (conn->hcon->type == LE_LINK)
2562 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2564 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2566 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2569 cmd->len = cpu_to_le16(dlen);
2572 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2573 memcpy(skb_put(skb, count), data, count);
2579 /* Continuation fragments (no L2CAP header) */
2580 frag = &skb_shinfo(skb)->frag_list;
2582 count = min_t(unsigned int, conn->mtu, len);
2584 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2588 memcpy(skb_put(*frag, count), data, count);
2593 frag = &(*frag)->next;
2603 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2605 struct l2cap_conf_opt *opt = *ptr;
2608 len = L2CAP_CONF_OPT_SIZE + opt->len;
2616 *val = *((u8 *) opt->val);
2620 *val = get_unaligned_le16(opt->val);
2624 *val = get_unaligned_le32(opt->val);
2628 *val = (unsigned long) opt->val;
2632 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2636 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2638 struct l2cap_conf_opt *opt = *ptr;
2640 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2647 *((u8 *) opt->val) = val;
2651 put_unaligned_le16(val, opt->val);
2655 put_unaligned_le32(val, opt->val);
2659 memcpy(opt->val, (void *) val, len);
2663 *ptr += L2CAP_CONF_OPT_SIZE + len;
2666 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2668 struct l2cap_conf_efs efs;
2670 switch (chan->mode) {
2671 case L2CAP_MODE_ERTM:
2672 efs.id = chan->local_id;
2673 efs.stype = chan->local_stype;
2674 efs.msdu = cpu_to_le16(chan->local_msdu);
2675 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2676 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2677 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2680 case L2CAP_MODE_STREAMING:
2682 efs.stype = L2CAP_SERV_BESTEFFORT;
2683 efs.msdu = cpu_to_le16(chan->local_msdu);
2684 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2693 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2694 (unsigned long) &efs);
2697 static void l2cap_ack_timeout(struct work_struct *work)
2699 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2703 BT_DBG("chan %p", chan);
2705 l2cap_chan_lock(chan);
2707 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2708 chan->last_acked_seq);
2711 l2cap_send_rr_or_rnr(chan, 0);
2713 l2cap_chan_unlock(chan);
2714 l2cap_chan_put(chan);
2717 int l2cap_ertm_init(struct l2cap_chan *chan)
2721 chan->next_tx_seq = 0;
2722 chan->expected_tx_seq = 0;
2723 chan->expected_ack_seq = 0;
2724 chan->unacked_frames = 0;
2725 chan->buffer_seq = 0;
2726 chan->frames_sent = 0;
2727 chan->last_acked_seq = 0;
2729 chan->sdu_last_frag = NULL;
2732 skb_queue_head_init(&chan->tx_q);
2734 if (chan->mode != L2CAP_MODE_ERTM)
2737 chan->rx_state = L2CAP_RX_STATE_RECV;
2738 chan->tx_state = L2CAP_TX_STATE_XMIT;
2740 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2741 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2742 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2744 skb_queue_head_init(&chan->srej_q);
2746 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2750 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2752 l2cap_seq_list_free(&chan->srej_list);
2757 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2760 case L2CAP_MODE_STREAMING:
2761 case L2CAP_MODE_ERTM:
2762 if (l2cap_mode_supported(mode, remote_feat_mask))
2766 return L2CAP_MODE_BASIC;
2770 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2772 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2775 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2777 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2780 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2782 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2783 __l2cap_ews_supported(chan)) {
2784 /* use extended control field */
2785 set_bit(FLAG_EXT_CTRL, &chan->flags);
2786 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2788 chan->tx_win = min_t(u16, chan->tx_win,
2789 L2CAP_DEFAULT_TX_WINDOW);
2790 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2792 chan->ack_win = chan->tx_win;
2795 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2797 struct l2cap_conf_req *req = data;
2798 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2799 void *ptr = req->data;
2802 BT_DBG("chan %p", chan);
2804 if (chan->num_conf_req || chan->num_conf_rsp)
2807 switch (chan->mode) {
2808 case L2CAP_MODE_STREAMING:
2809 case L2CAP_MODE_ERTM:
2810 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2813 if (__l2cap_efs_supported(chan))
2814 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2818 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2823 if (chan->imtu != L2CAP_DEFAULT_MTU)
2824 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2826 switch (chan->mode) {
2827 case L2CAP_MODE_BASIC:
2828 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2829 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2832 rfc.mode = L2CAP_MODE_BASIC;
2834 rfc.max_transmit = 0;
2835 rfc.retrans_timeout = 0;
2836 rfc.monitor_timeout = 0;
2837 rfc.max_pdu_size = 0;
2839 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2840 (unsigned long) &rfc);
2843 case L2CAP_MODE_ERTM:
2844 rfc.mode = L2CAP_MODE_ERTM;
2845 rfc.max_transmit = chan->max_tx;
2846 rfc.retrans_timeout = 0;
2847 rfc.monitor_timeout = 0;
2849 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2850 L2CAP_EXT_HDR_SIZE -
2853 rfc.max_pdu_size = cpu_to_le16(size);
2855 l2cap_txwin_setup(chan);
2857 rfc.txwin_size = min_t(u16, chan->tx_win,
2858 L2CAP_DEFAULT_TX_WINDOW);
2860 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2861 (unsigned long) &rfc);
2863 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2864 l2cap_add_opt_efs(&ptr, chan);
2866 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2869 if (chan->fcs == L2CAP_FCS_NONE ||
2870 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2871 chan->fcs = L2CAP_FCS_NONE;
2872 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2875 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2876 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2880 case L2CAP_MODE_STREAMING:
2881 l2cap_txwin_setup(chan);
2882 rfc.mode = L2CAP_MODE_STREAMING;
2884 rfc.max_transmit = 0;
2885 rfc.retrans_timeout = 0;
2886 rfc.monitor_timeout = 0;
2888 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2889 L2CAP_EXT_HDR_SIZE -
2892 rfc.max_pdu_size = cpu_to_le16(size);
2894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2895 (unsigned long) &rfc);
2897 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2898 l2cap_add_opt_efs(&ptr, chan);
2900 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2903 if (chan->fcs == L2CAP_FCS_NONE ||
2904 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2905 chan->fcs = L2CAP_FCS_NONE;
2906 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2911 req->dcid = cpu_to_le16(chan->dcid);
2912 req->flags = __constant_cpu_to_le16(0);
2917 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2919 struct l2cap_conf_rsp *rsp = data;
2920 void *ptr = rsp->data;
2921 void *req = chan->conf_req;
2922 int len = chan->conf_len;
2923 int type, hint, olen;
2925 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2926 struct l2cap_conf_efs efs;
2928 u16 mtu = L2CAP_DEFAULT_MTU;
2929 u16 result = L2CAP_CONF_SUCCESS;
2932 BT_DBG("chan %p", chan);
2934 while (len >= L2CAP_CONF_OPT_SIZE) {
2935 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2937 hint = type & L2CAP_CONF_HINT;
2938 type &= L2CAP_CONF_MASK;
2941 case L2CAP_CONF_MTU:
2945 case L2CAP_CONF_FLUSH_TO:
2946 chan->flush_to = val;
2949 case L2CAP_CONF_QOS:
2952 case L2CAP_CONF_RFC:
2953 if (olen == sizeof(rfc))
2954 memcpy(&rfc, (void *) val, olen);
2957 case L2CAP_CONF_FCS:
2958 if (val == L2CAP_FCS_NONE)
2959 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2962 case L2CAP_CONF_EFS:
2964 if (olen == sizeof(efs))
2965 memcpy(&efs, (void *) val, olen);
2968 case L2CAP_CONF_EWS:
2970 return -ECONNREFUSED;
2972 set_bit(FLAG_EXT_CTRL, &chan->flags);
2973 set_bit(CONF_EWS_RECV, &chan->conf_state);
2974 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2975 chan->remote_tx_win = val;
2982 result = L2CAP_CONF_UNKNOWN;
2983 *((u8 *) ptr++) = type;
2988 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2991 switch (chan->mode) {
2992 case L2CAP_MODE_STREAMING:
2993 case L2CAP_MODE_ERTM:
2994 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2995 chan->mode = l2cap_select_mode(rfc.mode,
2996 chan->conn->feat_mask);
3001 if (__l2cap_efs_supported(chan))
3002 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3004 return -ECONNREFUSED;
3007 if (chan->mode != rfc.mode)
3008 return -ECONNREFUSED;
3014 if (chan->mode != rfc.mode) {
3015 result = L2CAP_CONF_UNACCEPT;
3016 rfc.mode = chan->mode;
3018 if (chan->num_conf_rsp == 1)
3019 return -ECONNREFUSED;
3021 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3022 sizeof(rfc), (unsigned long) &rfc);
3025 if (result == L2CAP_CONF_SUCCESS) {
3026 /* Configure output options and let the other side know
3027 * which ones we don't like. */
3029 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3030 result = L2CAP_CONF_UNACCEPT;
3033 set_bit(CONF_MTU_DONE, &chan->conf_state);
3035 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3038 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3039 efs.stype != L2CAP_SERV_NOTRAFIC &&
3040 efs.stype != chan->local_stype) {
3042 result = L2CAP_CONF_UNACCEPT;
3044 if (chan->num_conf_req >= 1)
3045 return -ECONNREFUSED;
3047 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3049 (unsigned long) &efs);
3051 /* Send PENDING Conf Rsp */
3052 result = L2CAP_CONF_PENDING;
3053 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3058 case L2CAP_MODE_BASIC:
3059 chan->fcs = L2CAP_FCS_NONE;
3060 set_bit(CONF_MODE_DONE, &chan->conf_state);
3063 case L2CAP_MODE_ERTM:
3064 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3065 chan->remote_tx_win = rfc.txwin_size;
3067 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3069 chan->remote_max_tx = rfc.max_transmit;
3071 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3073 L2CAP_EXT_HDR_SIZE -
3076 rfc.max_pdu_size = cpu_to_le16(size);
3077 chan->remote_mps = size;
3079 rfc.retrans_timeout =
3080 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3081 rfc.monitor_timeout =
3082 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3084 set_bit(CONF_MODE_DONE, &chan->conf_state);
3086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3087 sizeof(rfc), (unsigned long) &rfc);
3089 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3090 chan->remote_id = efs.id;
3091 chan->remote_stype = efs.stype;
3092 chan->remote_msdu = le16_to_cpu(efs.msdu);
3093 chan->remote_flush_to =
3094 le32_to_cpu(efs.flush_to);
3095 chan->remote_acc_lat =
3096 le32_to_cpu(efs.acc_lat);
3097 chan->remote_sdu_itime =
3098 le32_to_cpu(efs.sdu_itime);
3099 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3100 sizeof(efs), (unsigned long) &efs);
3104 case L2CAP_MODE_STREAMING:
3105 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3107 L2CAP_EXT_HDR_SIZE -
3110 rfc.max_pdu_size = cpu_to_le16(size);
3111 chan->remote_mps = size;
3113 set_bit(CONF_MODE_DONE, &chan->conf_state);
3115 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3116 sizeof(rfc), (unsigned long) &rfc);
3121 result = L2CAP_CONF_UNACCEPT;
3123 memset(&rfc, 0, sizeof(rfc));
3124 rfc.mode = chan->mode;
3127 if (result == L2CAP_CONF_SUCCESS)
3128 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3130 rsp->scid = cpu_to_le16(chan->dcid);
3131 rsp->result = cpu_to_le16(result);
3132 rsp->flags = __constant_cpu_to_le16(0);
3137 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3139 struct l2cap_conf_req *req = data;
3140 void *ptr = req->data;
3143 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3144 struct l2cap_conf_efs efs;
3146 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3148 while (len >= L2CAP_CONF_OPT_SIZE) {
3149 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3152 case L2CAP_CONF_MTU:
3153 if (val < L2CAP_DEFAULT_MIN_MTU) {
3154 *result = L2CAP_CONF_UNACCEPT;
3155 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3158 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3161 case L2CAP_CONF_FLUSH_TO:
3162 chan->flush_to = val;
3163 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3167 case L2CAP_CONF_RFC:
3168 if (olen == sizeof(rfc))
3169 memcpy(&rfc, (void *)val, olen);
3171 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3172 rfc.mode != chan->mode)
3173 return -ECONNREFUSED;
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3178 sizeof(rfc), (unsigned long) &rfc);
3181 case L2CAP_CONF_EWS:
3182 chan->ack_win = min_t(u16, val, chan->ack_win);
3183 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3187 case L2CAP_CONF_EFS:
3188 if (olen == sizeof(efs))
3189 memcpy(&efs, (void *)val, olen);
3191 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3192 efs.stype != L2CAP_SERV_NOTRAFIC &&
3193 efs.stype != chan->local_stype)
3194 return -ECONNREFUSED;
3196 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3197 sizeof(efs), (unsigned long) &efs);
3202 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3203 return -ECONNREFUSED;
3205 chan->mode = rfc.mode;
3207 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3209 case L2CAP_MODE_ERTM:
3210 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3211 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3212 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3213 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3214 chan->ack_win = min_t(u16, chan->ack_win,
3217 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3218 chan->local_msdu = le16_to_cpu(efs.msdu);
3219 chan->local_sdu_itime =
3220 le32_to_cpu(efs.sdu_itime);
3221 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3222 chan->local_flush_to =
3223 le32_to_cpu(efs.flush_to);
3227 case L2CAP_MODE_STREAMING:
3228 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3232 req->dcid = cpu_to_le16(chan->dcid);
3233 req->flags = __constant_cpu_to_le16(0);
3238 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3240 struct l2cap_conf_rsp *rsp = data;
3241 void *ptr = rsp->data;
3243 BT_DBG("chan %p", chan);
3245 rsp->scid = cpu_to_le16(chan->dcid);
3246 rsp->result = cpu_to_le16(result);
3247 rsp->flags = cpu_to_le16(flags);
3252 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3254 struct l2cap_conn_rsp rsp;
3255 struct l2cap_conn *conn = chan->conn;
3258 rsp.scid = cpu_to_le16(chan->dcid);
3259 rsp.dcid = cpu_to_le16(chan->scid);
3260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3262 l2cap_send_cmd(conn, chan->ident,
3263 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3265 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3268 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3269 l2cap_build_conf_req(chan, buf), buf);
3270 chan->num_conf_req++;
3273 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3277 /* Use sane default values in case a misbehaving remote device
3278 * did not send an RFC or extended window size option.
3280 u16 txwin_ext = chan->ack_win;
3281 struct l2cap_conf_rfc rfc = {
3283 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3284 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3285 .max_pdu_size = cpu_to_le16(chan->imtu),
3286 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3289 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3291 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3294 while (len >= L2CAP_CONF_OPT_SIZE) {
3295 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3298 case L2CAP_CONF_RFC:
3299 if (olen == sizeof(rfc))
3300 memcpy(&rfc, (void *)val, olen);
3302 case L2CAP_CONF_EWS:
3309 case L2CAP_MODE_ERTM:
3310 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3311 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3312 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3313 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3314 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3316 chan->ack_win = min_t(u16, chan->ack_win,
3319 case L2CAP_MODE_STREAMING:
3320 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3324 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3326 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3328 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3331 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3332 cmd->ident == conn->info_ident) {
3333 cancel_delayed_work(&conn->info_timer);
3335 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3336 conn->info_ident = 0;
3338 l2cap_conn_start(conn);
3344 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3346 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3347 struct l2cap_conn_rsp rsp;
3348 struct l2cap_chan *chan = NULL, *pchan;
3349 struct sock *parent, *sk = NULL;
3350 int result, status = L2CAP_CS_NO_INFO;
3352 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3353 __le16 psm = req->psm;
3355 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3357 /* Check if we have socket listening on psm */
3358 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3360 result = L2CAP_CR_BAD_PSM;
3366 mutex_lock(&conn->chan_lock);
3369 /* Check if the ACL is secure enough (if not SDP) */
3370 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3371 !hci_conn_check_link_mode(conn->hcon)) {
3372 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3373 result = L2CAP_CR_SEC_BLOCK;
3377 result = L2CAP_CR_NO_MEM;
3379 /* Check if we already have channel with that dcid */
3380 if (__l2cap_get_chan_by_dcid(conn, scid))
3383 chan = pchan->ops->new_connection(pchan);
3389 hci_conn_hold(conn->hcon);
3391 bacpy(&bt_sk(sk)->src, conn->src);
3392 bacpy(&bt_sk(sk)->dst, conn->dst);
3396 bt_accept_enqueue(parent, sk);
3398 __l2cap_chan_add(conn, chan);
3402 __set_chan_timer(chan, sk->sk_sndtimeo);
3404 chan->ident = cmd->ident;
3406 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3407 if (l2cap_chan_check_security(chan)) {
3408 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3409 __l2cap_state_change(chan, BT_CONNECT2);
3410 result = L2CAP_CR_PEND;
3411 status = L2CAP_CS_AUTHOR_PEND;
3412 parent->sk_data_ready(parent, 0);
3414 __l2cap_state_change(chan, BT_CONFIG);
3415 result = L2CAP_CR_SUCCESS;
3416 status = L2CAP_CS_NO_INFO;
3419 __l2cap_state_change(chan, BT_CONNECT2);
3420 result = L2CAP_CR_PEND;
3421 status = L2CAP_CS_AUTHEN_PEND;
3424 __l2cap_state_change(chan, BT_CONNECT2);
3425 result = L2CAP_CR_PEND;
3426 status = L2CAP_CS_NO_INFO;
3430 release_sock(parent);
3431 mutex_unlock(&conn->chan_lock);
3434 rsp.scid = cpu_to_le16(scid);
3435 rsp.dcid = cpu_to_le16(dcid);
3436 rsp.result = cpu_to_le16(result);
3437 rsp.status = cpu_to_le16(status);
3438 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3440 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3441 struct l2cap_info_req info;
3442 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3444 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3445 conn->info_ident = l2cap_get_ident(conn);
3447 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3449 l2cap_send_cmd(conn, conn->info_ident,
3450 L2CAP_INFO_REQ, sizeof(info), &info);
3453 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3454 result == L2CAP_CR_SUCCESS) {
3456 set_bit(CONF_REQ_SENT, &chan->conf_state);
3457 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3458 l2cap_build_conf_req(chan, buf), buf);
3459 chan->num_conf_req++;
3465 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3467 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3468 u16 scid, dcid, result, status;
3469 struct l2cap_chan *chan;
3473 scid = __le16_to_cpu(rsp->scid);
3474 dcid = __le16_to_cpu(rsp->dcid);
3475 result = __le16_to_cpu(rsp->result);
3476 status = __le16_to_cpu(rsp->status);
3478 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3479 dcid, scid, result, status);
3481 mutex_lock(&conn->chan_lock);
3484 chan = __l2cap_get_chan_by_scid(conn, scid);
3490 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3499 l2cap_chan_lock(chan);
3502 case L2CAP_CR_SUCCESS:
3503 l2cap_state_change(chan, BT_CONFIG);
3506 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3508 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3511 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3512 l2cap_build_conf_req(chan, req), req);
3513 chan->num_conf_req++;
3517 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3521 l2cap_chan_del(chan, ECONNREFUSED);
3525 l2cap_chan_unlock(chan);
3528 mutex_unlock(&conn->chan_lock);
3533 static inline void set_default_fcs(struct l2cap_chan *chan)
3535 /* FCS is enabled only in ERTM or streaming mode, if one or both
3538 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3539 chan->fcs = L2CAP_FCS_NONE;
3540 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3541 chan->fcs = L2CAP_FCS_CRC16;
3544 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3546 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3549 struct l2cap_chan *chan;
3552 dcid = __le16_to_cpu(req->dcid);
3553 flags = __le16_to_cpu(req->flags);
3555 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3557 chan = l2cap_get_chan_by_scid(conn, dcid);
3561 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3562 struct l2cap_cmd_rej_cid rej;
3564 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3565 rej.scid = cpu_to_le16(chan->scid);
3566 rej.dcid = cpu_to_le16(chan->dcid);
3568 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3573 /* Reject if config buffer is too small. */
3574 len = cmd_len - sizeof(*req);
3575 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3576 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3577 l2cap_build_conf_rsp(chan, rsp,
3578 L2CAP_CONF_REJECT, flags), rsp);
3583 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3584 chan->conf_len += len;
3586 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3587 /* Incomplete config. Send empty response. */
3588 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3589 l2cap_build_conf_rsp(chan, rsp,
3590 L2CAP_CONF_SUCCESS, flags), rsp);
3594 /* Complete config. */
3595 len = l2cap_parse_conf_req(chan, rsp);
3597 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3601 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3602 chan->num_conf_rsp++;
3604 /* Reset config buffer. */
3607 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3610 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3611 set_default_fcs(chan);
3613 if (chan->mode == L2CAP_MODE_ERTM ||
3614 chan->mode == L2CAP_MODE_STREAMING)
3615 err = l2cap_ertm_init(chan);
3618 l2cap_send_disconn_req(chan->conn, chan, -err);
3620 l2cap_chan_ready(chan);
3625 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3627 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3628 l2cap_build_conf_req(chan, buf), buf);
3629 chan->num_conf_req++;
3632 /* Got Conf Rsp PENDING from remote side and asume we sent
3633 Conf Rsp PENDING in the code above */
3634 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3635 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3637 /* check compatibility */
3639 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3640 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3642 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3643 l2cap_build_conf_rsp(chan, rsp,
3644 L2CAP_CONF_SUCCESS, flags), rsp);
3648 l2cap_chan_unlock(chan);
3652 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3654 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3655 u16 scid, flags, result;
3656 struct l2cap_chan *chan;
3657 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3660 scid = __le16_to_cpu(rsp->scid);
3661 flags = __le16_to_cpu(rsp->flags);
3662 result = __le16_to_cpu(rsp->result);
3664 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3667 chan = l2cap_get_chan_by_scid(conn, scid);
3672 case L2CAP_CONF_SUCCESS:
3673 l2cap_conf_rfc_get(chan, rsp->data, len);
3674 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3677 case L2CAP_CONF_PENDING:
3678 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3680 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3683 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3686 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3690 /* check compatibility */
3692 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3693 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3695 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3696 l2cap_build_conf_rsp(chan, buf,
3697 L2CAP_CONF_SUCCESS, 0x0000), buf);
3701 case L2CAP_CONF_UNACCEPT:
3702 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3705 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3706 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3710 /* throw out any old stored conf requests */
3711 result = L2CAP_CONF_SUCCESS;
3712 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3715 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3719 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3720 L2CAP_CONF_REQ, len, req);
3721 chan->num_conf_req++;
3722 if (result != L2CAP_CONF_SUCCESS)
3728 l2cap_chan_set_err(chan, ECONNRESET);
3730 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3731 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3735 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3738 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3740 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3741 set_default_fcs(chan);
3743 if (chan->mode == L2CAP_MODE_ERTM ||
3744 chan->mode == L2CAP_MODE_STREAMING)
3745 err = l2cap_ertm_init(chan);
3748 l2cap_send_disconn_req(chan->conn, chan, -err);
3750 l2cap_chan_ready(chan);
3754 l2cap_chan_unlock(chan);
3758 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3760 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3761 struct l2cap_disconn_rsp rsp;
3763 struct l2cap_chan *chan;
3766 scid = __le16_to_cpu(req->scid);
3767 dcid = __le16_to_cpu(req->dcid);
3769 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3771 mutex_lock(&conn->chan_lock);
3773 chan = __l2cap_get_chan_by_scid(conn, dcid);
3775 mutex_unlock(&conn->chan_lock);
3779 l2cap_chan_lock(chan);
3783 rsp.dcid = cpu_to_le16(chan->scid);
3784 rsp.scid = cpu_to_le16(chan->dcid);
3785 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3788 sk->sk_shutdown = SHUTDOWN_MASK;
3791 l2cap_chan_hold(chan);
3792 l2cap_chan_del(chan, ECONNRESET);
3794 l2cap_chan_unlock(chan);
3796 chan->ops->close(chan);
3797 l2cap_chan_put(chan);
3799 mutex_unlock(&conn->chan_lock);
3804 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3806 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3808 struct l2cap_chan *chan;
3810 scid = __le16_to_cpu(rsp->scid);
3811 dcid = __le16_to_cpu(rsp->dcid);
3813 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3815 mutex_lock(&conn->chan_lock);
3817 chan = __l2cap_get_chan_by_scid(conn, scid);
3819 mutex_unlock(&conn->chan_lock);
3823 l2cap_chan_lock(chan);
3825 l2cap_chan_hold(chan);
3826 l2cap_chan_del(chan, 0);
3828 l2cap_chan_unlock(chan);
3830 chan->ops->close(chan);
3831 l2cap_chan_put(chan);
3833 mutex_unlock(&conn->chan_lock);
3838 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3840 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3843 type = __le16_to_cpu(req->type);
3845 BT_DBG("type 0x%4.4x", type);
3847 if (type == L2CAP_IT_FEAT_MASK) {
3849 u32 feat_mask = l2cap_feat_mask;
3850 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3851 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3852 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3854 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3857 feat_mask |= L2CAP_FEAT_EXT_FLOW
3858 | L2CAP_FEAT_EXT_WINDOW;
3860 put_unaligned_le32(feat_mask, rsp->data);
3861 l2cap_send_cmd(conn, cmd->ident,
3862 L2CAP_INFO_RSP, sizeof(buf), buf);
3863 } else if (type == L2CAP_IT_FIXED_CHAN) {
3865 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3868 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3870 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3872 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3873 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3874 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3875 l2cap_send_cmd(conn, cmd->ident,
3876 L2CAP_INFO_RSP, sizeof(buf), buf);
3878 struct l2cap_info_rsp rsp;
3879 rsp.type = cpu_to_le16(type);
3880 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3881 l2cap_send_cmd(conn, cmd->ident,
3882 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3888 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3890 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3893 type = __le16_to_cpu(rsp->type);
3894 result = __le16_to_cpu(rsp->result);
3896 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3898 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3899 if (cmd->ident != conn->info_ident ||
3900 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3903 cancel_delayed_work(&conn->info_timer);
3905 if (result != L2CAP_IR_SUCCESS) {
3906 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3907 conn->info_ident = 0;
3909 l2cap_conn_start(conn);
3915 case L2CAP_IT_FEAT_MASK:
3916 conn->feat_mask = get_unaligned_le32(rsp->data);
3918 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3919 struct l2cap_info_req req;
3920 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3922 conn->info_ident = l2cap_get_ident(conn);
3924 l2cap_send_cmd(conn, conn->info_ident,
3925 L2CAP_INFO_REQ, sizeof(req), &req);
3927 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3928 conn->info_ident = 0;
3930 l2cap_conn_start(conn);
3934 case L2CAP_IT_FIXED_CHAN:
3935 conn->fixed_chan_mask = rsp->data[0];
3936 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3937 conn->info_ident = 0;
3939 l2cap_conn_start(conn);
3946 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3950 struct l2cap_create_chan_req *req = data;
3951 struct l2cap_create_chan_rsp rsp;
3954 if (cmd_len != sizeof(*req))
3960 psm = le16_to_cpu(req->psm);
3961 scid = le16_to_cpu(req->scid);
3963 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3965 /* Placeholder: Always reject */
3967 rsp.scid = cpu_to_le16(scid);
3968 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3969 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3971 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3977 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3978 struct l2cap_cmd_hdr *cmd, void *data)
3980 BT_DBG("conn %p", conn);
3982 return l2cap_connect_rsp(conn, cmd, data);
3985 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3986 u16 icid, u16 result)
3988 struct l2cap_move_chan_rsp rsp;
3990 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3992 rsp.icid = cpu_to_le16(icid);
3993 rsp.result = cpu_to_le16(result);
3995 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3998 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3999 struct l2cap_chan *chan,
4000 u16 icid, u16 result)
4002 struct l2cap_move_chan_cfm cfm;
4005 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4007 ident = l2cap_get_ident(conn);
4009 chan->ident = ident;
4011 cfm.icid = cpu_to_le16(icid);
4012 cfm.result = cpu_to_le16(result);
4014 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4017 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4020 struct l2cap_move_chan_cfm_rsp rsp;
4022 BT_DBG("icid 0x%4.4x", icid);
4024 rsp.icid = cpu_to_le16(icid);
4025 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4028 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4029 struct l2cap_cmd_hdr *cmd,
4030 u16 cmd_len, void *data)
4032 struct l2cap_move_chan_req *req = data;
4034 u16 result = L2CAP_MR_NOT_ALLOWED;
4036 if (cmd_len != sizeof(*req))
4039 icid = le16_to_cpu(req->icid);
4041 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4046 /* Placeholder: Always refuse */
4047 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4052 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4053 struct l2cap_cmd_hdr *cmd,
4054 u16 cmd_len, void *data)
4056 struct l2cap_move_chan_rsp *rsp = data;
4059 if (cmd_len != sizeof(*rsp))
4062 icid = le16_to_cpu(rsp->icid);
4063 result = le16_to_cpu(rsp->result);
4065 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4067 /* Placeholder: Always unconfirmed */
4068 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4073 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4074 struct l2cap_cmd_hdr *cmd,
4075 u16 cmd_len, void *data)
4077 struct l2cap_move_chan_cfm *cfm = data;
4080 if (cmd_len != sizeof(*cfm))
4083 icid = le16_to_cpu(cfm->icid);
4084 result = le16_to_cpu(cfm->result);
4086 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4088 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4093 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4094 struct l2cap_cmd_hdr *cmd,
4095 u16 cmd_len, void *data)
4097 struct l2cap_move_chan_cfm_rsp *rsp = data;
4100 if (cmd_len != sizeof(*rsp))
4103 icid = le16_to_cpu(rsp->icid);
4105 BT_DBG("icid 0x%4.4x", icid);
4110 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4115 if (min > max || min < 6 || max > 3200)
4118 if (to_multiplier < 10 || to_multiplier > 3200)
4121 if (max >= to_multiplier * 8)
4124 max_latency = (to_multiplier * 8 / max) - 1;
4125 if (latency > 499 || latency > max_latency)
4131 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4132 struct l2cap_cmd_hdr *cmd, u8 *data)
4134 struct hci_conn *hcon = conn->hcon;
4135 struct l2cap_conn_param_update_req *req;
4136 struct l2cap_conn_param_update_rsp rsp;
4137 u16 min, max, latency, to_multiplier, cmd_len;
4140 if (!(hcon->link_mode & HCI_LM_MASTER))
4143 cmd_len = __le16_to_cpu(cmd->len);
4144 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4147 req = (struct l2cap_conn_param_update_req *) data;
4148 min = __le16_to_cpu(req->min);
4149 max = __le16_to_cpu(req->max);
4150 latency = __le16_to_cpu(req->latency);
4151 to_multiplier = __le16_to_cpu(req->to_multiplier);
4153 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4154 min, max, latency, to_multiplier);
4156 memset(&rsp, 0, sizeof(rsp));
4158 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4160 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4162 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4164 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4168 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4173 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4174 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4178 switch (cmd->code) {
4179 case L2CAP_COMMAND_REJ:
4180 l2cap_command_rej(conn, cmd, data);
4183 case L2CAP_CONN_REQ:
4184 err = l2cap_connect_req(conn, cmd, data);
4187 case L2CAP_CONN_RSP:
4188 err = l2cap_connect_rsp(conn, cmd, data);
4191 case L2CAP_CONF_REQ:
4192 err = l2cap_config_req(conn, cmd, cmd_len, data);
4195 case L2CAP_CONF_RSP:
4196 err = l2cap_config_rsp(conn, cmd, data);
4199 case L2CAP_DISCONN_REQ:
4200 err = l2cap_disconnect_req(conn, cmd, data);
4203 case L2CAP_DISCONN_RSP:
4204 err = l2cap_disconnect_rsp(conn, cmd, data);
4207 case L2CAP_ECHO_REQ:
4208 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4211 case L2CAP_ECHO_RSP:
4214 case L2CAP_INFO_REQ:
4215 err = l2cap_information_req(conn, cmd, data);
4218 case L2CAP_INFO_RSP:
4219 err = l2cap_information_rsp(conn, cmd, data);
4222 case L2CAP_CREATE_CHAN_REQ:
4223 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4226 case L2CAP_CREATE_CHAN_RSP:
4227 err = l2cap_create_channel_rsp(conn, cmd, data);
4230 case L2CAP_MOVE_CHAN_REQ:
4231 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4234 case L2CAP_MOVE_CHAN_RSP:
4235 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4238 case L2CAP_MOVE_CHAN_CFM:
4239 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4242 case L2CAP_MOVE_CHAN_CFM_RSP:
4243 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4247 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4255 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4256 struct l2cap_cmd_hdr *cmd, u8 *data)
4258 switch (cmd->code) {
4259 case L2CAP_COMMAND_REJ:
4262 case L2CAP_CONN_PARAM_UPDATE_REQ:
4263 return l2cap_conn_param_update_req(conn, cmd, data);
4265 case L2CAP_CONN_PARAM_UPDATE_RSP:
4269 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4274 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4275 struct sk_buff *skb)
4277 u8 *data = skb->data;
4279 struct l2cap_cmd_hdr cmd;
4282 l2cap_raw_recv(conn, skb);
4284 while (len >= L2CAP_CMD_HDR_SIZE) {
4286 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4287 data += L2CAP_CMD_HDR_SIZE;
4288 len -= L2CAP_CMD_HDR_SIZE;
4290 cmd_len = le16_to_cpu(cmd.len);
4292 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4294 if (cmd_len > len || !cmd.ident) {
4295 BT_DBG("corrupted command");
4299 if (conn->hcon->type == LE_LINK)
4300 err = l2cap_le_sig_cmd(conn, &cmd, data);
4302 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4305 struct l2cap_cmd_rej_unk rej;
4307 BT_ERR("Wrong link type (%d)", err);
4309 /* FIXME: Map err to a valid reason */
4310 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4311 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4321 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4323 u16 our_fcs, rcv_fcs;
4326 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4327 hdr_size = L2CAP_EXT_HDR_SIZE;
4329 hdr_size = L2CAP_ENH_HDR_SIZE;
4331 if (chan->fcs == L2CAP_FCS_CRC16) {
4332 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4333 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4334 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4336 if (our_fcs != rcv_fcs)
4342 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4344 struct l2cap_ctrl control;
4346 BT_DBG("chan %p", chan);
4348 memset(&control, 0, sizeof(control));
4351 control.reqseq = chan->buffer_seq;
4352 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4354 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4355 control.super = L2CAP_SUPER_RNR;
4356 l2cap_send_sframe(chan, &control);
4359 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4360 chan->unacked_frames > 0)
4361 __set_retrans_timer(chan);
4363 /* Send pending iframes */
4364 l2cap_ertm_send(chan);
4366 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4367 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4368 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4371 control.super = L2CAP_SUPER_RR;
4372 l2cap_send_sframe(chan, &control);
4376 static void append_skb_frag(struct sk_buff *skb,
4377 struct sk_buff *new_frag, struct sk_buff **last_frag)
4379 /* skb->len reflects data in skb as well as all fragments
4380 * skb->data_len reflects only data in fragments
4382 if (!skb_has_frag_list(skb))
4383 skb_shinfo(skb)->frag_list = new_frag;
4385 new_frag->next = NULL;
4387 (*last_frag)->next = new_frag;
4388 *last_frag = new_frag;
4390 skb->len += new_frag->len;
4391 skb->data_len += new_frag->len;
4392 skb->truesize += new_frag->truesize;
4395 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4396 struct l2cap_ctrl *control)
4400 switch (control->sar) {
4401 case L2CAP_SAR_UNSEGMENTED:
4405 err = chan->ops->recv(chan, skb);
4408 case L2CAP_SAR_START:
4412 chan->sdu_len = get_unaligned_le16(skb->data);
4413 skb_pull(skb, L2CAP_SDULEN_SIZE);
4415 if (chan->sdu_len > chan->imtu) {
4420 if (skb->len >= chan->sdu_len)
4424 chan->sdu_last_frag = skb;
4430 case L2CAP_SAR_CONTINUE:
4434 append_skb_frag(chan->sdu, skb,
4435 &chan->sdu_last_frag);
4438 if (chan->sdu->len >= chan->sdu_len)
4448 append_skb_frag(chan->sdu, skb,
4449 &chan->sdu_last_frag);
4452 if (chan->sdu->len != chan->sdu_len)
4455 err = chan->ops->recv(chan, chan->sdu);
4458 /* Reassembly complete */
4460 chan->sdu_last_frag = NULL;
4468 kfree_skb(chan->sdu);
4470 chan->sdu_last_frag = NULL;
4477 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4481 if (chan->mode != L2CAP_MODE_ERTM)
4484 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4485 l2cap_tx(chan, NULL, NULL, event);
4488 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4491 /* Pass sequential frames to l2cap_reassemble_sdu()
4492 * until a gap is encountered.
4495 BT_DBG("chan %p", chan);
4497 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4498 struct sk_buff *skb;
4499 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4500 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4502 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4507 skb_unlink(skb, &chan->srej_q);
4508 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4509 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4514 if (skb_queue_empty(&chan->srej_q)) {
4515 chan->rx_state = L2CAP_RX_STATE_RECV;
4516 l2cap_send_ack(chan);
4522 static void l2cap_handle_srej(struct l2cap_chan *chan,
4523 struct l2cap_ctrl *control)
4525 struct sk_buff *skb;
4527 BT_DBG("chan %p, control %p", chan, control);
4529 if (control->reqseq == chan->next_tx_seq) {
4530 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4531 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4535 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4538 BT_DBG("Seq %d not available for retransmission",
4543 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4544 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4545 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4549 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4551 if (control->poll) {
4552 l2cap_pass_to_tx(chan, control);
4554 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4555 l2cap_retransmit(chan, control);
4556 l2cap_ertm_send(chan);
4558 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4559 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4560 chan->srej_save_reqseq = control->reqseq;
4563 l2cap_pass_to_tx_fbit(chan, control);
4565 if (control->final) {
4566 if (chan->srej_save_reqseq != control->reqseq ||
4567 !test_and_clear_bit(CONN_SREJ_ACT,
4569 l2cap_retransmit(chan, control);
4571 l2cap_retransmit(chan, control);
4572 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4573 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4574 chan->srej_save_reqseq = control->reqseq;
4580 static void l2cap_handle_rej(struct l2cap_chan *chan,
4581 struct l2cap_ctrl *control)
4583 struct sk_buff *skb;
4585 BT_DBG("chan %p, control %p", chan, control);
4587 if (control->reqseq == chan->next_tx_seq) {
4588 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4589 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4593 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4595 if (chan->max_tx && skb &&
4596 bt_cb(skb)->control.retries >= chan->max_tx) {
4597 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4598 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4602 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4604 l2cap_pass_to_tx(chan, control);
4606 if (control->final) {
4607 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4608 l2cap_retransmit_all(chan, control);
4610 l2cap_retransmit_all(chan, control);
4611 l2cap_ertm_send(chan);
4612 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4613 set_bit(CONN_REJ_ACT, &chan->conn_state);
4617 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4619 BT_DBG("chan %p, txseq %d", chan, txseq);
4621 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4622 chan->expected_tx_seq);
4624 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4625 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4627 /* See notes below regarding "double poll" and
4630 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4631 BT_DBG("Invalid/Ignore - after SREJ");
4632 return L2CAP_TXSEQ_INVALID_IGNORE;
4634 BT_DBG("Invalid - in window after SREJ sent");
4635 return L2CAP_TXSEQ_INVALID;
4639 if (chan->srej_list.head == txseq) {
4640 BT_DBG("Expected SREJ");
4641 return L2CAP_TXSEQ_EXPECTED_SREJ;
4644 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4645 BT_DBG("Duplicate SREJ - txseq already stored");
4646 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4649 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4650 BT_DBG("Unexpected SREJ - not requested");
4651 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4655 if (chan->expected_tx_seq == txseq) {
4656 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4658 BT_DBG("Invalid - txseq outside tx window");
4659 return L2CAP_TXSEQ_INVALID;
4662 return L2CAP_TXSEQ_EXPECTED;
4666 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4667 __seq_offset(chan, chan->expected_tx_seq,
4668 chan->last_acked_seq)){
4669 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4670 return L2CAP_TXSEQ_DUPLICATE;
4673 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4674 /* A source of invalid packets is a "double poll" condition,
4675 * where delays cause us to send multiple poll packets. If
4676 * the remote stack receives and processes both polls,
4677 * sequence numbers can wrap around in such a way that a
4678 * resent frame has a sequence number that looks like new data
4679 * with a sequence gap. This would trigger an erroneous SREJ
4682 * Fortunately, this is impossible with a tx window that's
4683 * less than half of the maximum sequence number, which allows
4684 * invalid frames to be safely ignored.
4686 * With tx window sizes greater than half of the tx window
4687 * maximum, the frame is invalid and cannot be ignored. This
4688 * causes a disconnect.
4691 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4692 BT_DBG("Invalid/Ignore - txseq outside tx window");
4693 return L2CAP_TXSEQ_INVALID_IGNORE;
4695 BT_DBG("Invalid - txseq outside tx window");
4696 return L2CAP_TXSEQ_INVALID;
4699 BT_DBG("Unexpected - txseq indicates missing frames");
4700 return L2CAP_TXSEQ_UNEXPECTED;
4704 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4705 struct l2cap_ctrl *control,
4706 struct sk_buff *skb, u8 event)
4709 bool skb_in_use = 0;
4711 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4715 case L2CAP_EV_RECV_IFRAME:
4716 switch (l2cap_classify_txseq(chan, control->txseq)) {
4717 case L2CAP_TXSEQ_EXPECTED:
4718 l2cap_pass_to_tx(chan, control);
4720 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4721 BT_DBG("Busy, discarding expected seq %d",
4726 chan->expected_tx_seq = __next_seq(chan,
4729 chan->buffer_seq = chan->expected_tx_seq;
4732 err = l2cap_reassemble_sdu(chan, skb, control);
4736 if (control->final) {
4737 if (!test_and_clear_bit(CONN_REJ_ACT,
4738 &chan->conn_state)) {
4740 l2cap_retransmit_all(chan, control);
4741 l2cap_ertm_send(chan);
4745 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4746 l2cap_send_ack(chan);
4748 case L2CAP_TXSEQ_UNEXPECTED:
4749 l2cap_pass_to_tx(chan, control);
4751 /* Can't issue SREJ frames in the local busy state.
4752 * Drop this frame, it will be seen as missing
4753 * when local busy is exited.
4755 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4756 BT_DBG("Busy, discarding unexpected seq %d",
4761 /* There was a gap in the sequence, so an SREJ
4762 * must be sent for each missing frame. The
4763 * current frame is stored for later use.
4765 skb_queue_tail(&chan->srej_q, skb);
4767 BT_DBG("Queued %p (queue len %d)", skb,
4768 skb_queue_len(&chan->srej_q));
4770 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4771 l2cap_seq_list_clear(&chan->srej_list);
4772 l2cap_send_srej(chan, control->txseq);
4774 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4776 case L2CAP_TXSEQ_DUPLICATE:
4777 l2cap_pass_to_tx(chan, control);
4779 case L2CAP_TXSEQ_INVALID_IGNORE:
4781 case L2CAP_TXSEQ_INVALID:
4783 l2cap_send_disconn_req(chan->conn, chan,
4788 case L2CAP_EV_RECV_RR:
4789 l2cap_pass_to_tx(chan, control);
4790 if (control->final) {
4791 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4793 if (!test_and_clear_bit(CONN_REJ_ACT,
4794 &chan->conn_state)) {
4796 l2cap_retransmit_all(chan, control);
4799 l2cap_ertm_send(chan);
4800 } else if (control->poll) {
4801 l2cap_send_i_or_rr_or_rnr(chan);
4803 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4804 &chan->conn_state) &&
4805 chan->unacked_frames)
4806 __set_retrans_timer(chan);
4808 l2cap_ertm_send(chan);
4811 case L2CAP_EV_RECV_RNR:
4812 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4813 l2cap_pass_to_tx(chan, control);
4814 if (control && control->poll) {
4815 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4816 l2cap_send_rr_or_rnr(chan, 0);
4818 __clear_retrans_timer(chan);
4819 l2cap_seq_list_clear(&chan->retrans_list);
4821 case L2CAP_EV_RECV_REJ:
4822 l2cap_handle_rej(chan, control);
4824 case L2CAP_EV_RECV_SREJ:
4825 l2cap_handle_srej(chan, control);
4831 if (skb && !skb_in_use) {
4832 BT_DBG("Freeing %p", skb);
4839 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4840 struct l2cap_ctrl *control,
4841 struct sk_buff *skb, u8 event)
4844 u16 txseq = control->txseq;
4845 bool skb_in_use = 0;
4847 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4851 case L2CAP_EV_RECV_IFRAME:
4852 switch (l2cap_classify_txseq(chan, txseq)) {
4853 case L2CAP_TXSEQ_EXPECTED:
4854 /* Keep frame for reassembly later */
4855 l2cap_pass_to_tx(chan, control);
4856 skb_queue_tail(&chan->srej_q, skb);
4858 BT_DBG("Queued %p (queue len %d)", skb,
4859 skb_queue_len(&chan->srej_q));
4861 chan->expected_tx_seq = __next_seq(chan, txseq);
4863 case L2CAP_TXSEQ_EXPECTED_SREJ:
4864 l2cap_seq_list_pop(&chan->srej_list);
4866 l2cap_pass_to_tx(chan, control);
4867 skb_queue_tail(&chan->srej_q, skb);
4869 BT_DBG("Queued %p (queue len %d)", skb,
4870 skb_queue_len(&chan->srej_q));
4872 err = l2cap_rx_queued_iframes(chan);
4877 case L2CAP_TXSEQ_UNEXPECTED:
4878 /* Got a frame that can't be reassembled yet.
4879 * Save it for later, and send SREJs to cover
4880 * the missing frames.
4882 skb_queue_tail(&chan->srej_q, skb);
4884 BT_DBG("Queued %p (queue len %d)", skb,
4885 skb_queue_len(&chan->srej_q));
4887 l2cap_pass_to_tx(chan, control);
4888 l2cap_send_srej(chan, control->txseq);
4890 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4891 /* This frame was requested with an SREJ, but
4892 * some expected retransmitted frames are
4893 * missing. Request retransmission of missing
4896 skb_queue_tail(&chan->srej_q, skb);
4898 BT_DBG("Queued %p (queue len %d)", skb,
4899 skb_queue_len(&chan->srej_q));
4901 l2cap_pass_to_tx(chan, control);
4902 l2cap_send_srej_list(chan, control->txseq);
4904 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4905 /* We've already queued this frame. Drop this copy. */
4906 l2cap_pass_to_tx(chan, control);
4908 case L2CAP_TXSEQ_DUPLICATE:
4909 /* Expecting a later sequence number, so this frame
4910 * was already received. Ignore it completely.
4913 case L2CAP_TXSEQ_INVALID_IGNORE:
4915 case L2CAP_TXSEQ_INVALID:
4917 l2cap_send_disconn_req(chan->conn, chan,
4922 case L2CAP_EV_RECV_RR:
4923 l2cap_pass_to_tx(chan, control);
4924 if (control->final) {
4925 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4927 if (!test_and_clear_bit(CONN_REJ_ACT,
4928 &chan->conn_state)) {
4930 l2cap_retransmit_all(chan, control);
4933 l2cap_ertm_send(chan);
4934 } else if (control->poll) {
4935 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4936 &chan->conn_state) &&
4937 chan->unacked_frames) {
4938 __set_retrans_timer(chan);
4941 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4942 l2cap_send_srej_tail(chan);
4944 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4945 &chan->conn_state) &&
4946 chan->unacked_frames)
4947 __set_retrans_timer(chan);
4949 l2cap_send_ack(chan);
4952 case L2CAP_EV_RECV_RNR:
4953 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4954 l2cap_pass_to_tx(chan, control);
4955 if (control->poll) {
4956 l2cap_send_srej_tail(chan);
4958 struct l2cap_ctrl rr_control;
4959 memset(&rr_control, 0, sizeof(rr_control));
4960 rr_control.sframe = 1;
4961 rr_control.super = L2CAP_SUPER_RR;
4962 rr_control.reqseq = chan->buffer_seq;
4963 l2cap_send_sframe(chan, &rr_control);
4967 case L2CAP_EV_RECV_REJ:
4968 l2cap_handle_rej(chan, control);
4970 case L2CAP_EV_RECV_SREJ:
4971 l2cap_handle_srej(chan, control);
4975 if (skb && !skb_in_use) {
4976 BT_DBG("Freeing %p", skb);
4983 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4985 /* Make sure reqseq is for a packet that has been sent but not acked */
4988 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4989 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4992 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4993 struct sk_buff *skb, u8 event)
4997 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4998 control, skb, event, chan->rx_state);
5000 if (__valid_reqseq(chan, control->reqseq)) {
5001 switch (chan->rx_state) {
5002 case L2CAP_RX_STATE_RECV:
5003 err = l2cap_rx_state_recv(chan, control, skb, event);
5005 case L2CAP_RX_STATE_SREJ_SENT:
5006 err = l2cap_rx_state_srej_sent(chan, control, skb,
5014 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5015 control->reqseq, chan->next_tx_seq,
5016 chan->expected_ack_seq);
5017 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5023 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5024 struct sk_buff *skb)
5028 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5031 if (l2cap_classify_txseq(chan, control->txseq) ==
5032 L2CAP_TXSEQ_EXPECTED) {
5033 l2cap_pass_to_tx(chan, control);
5035 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5036 __next_seq(chan, chan->buffer_seq));
5038 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5040 l2cap_reassemble_sdu(chan, skb, control);
5043 kfree_skb(chan->sdu);
5046 chan->sdu_last_frag = NULL;
5050 BT_DBG("Freeing %p", skb);
5055 chan->last_acked_seq = control->txseq;
5056 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5061 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5063 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5067 __unpack_control(chan, skb);
5072 * We can just drop the corrupted I-frame here.
5073 * Receiver will miss it and start proper recovery
5074 * procedures and ask for retransmission.
5076 if (l2cap_check_fcs(chan, skb))
5079 if (!control->sframe && control->sar == L2CAP_SAR_START)
5080 len -= L2CAP_SDULEN_SIZE;
5082 if (chan->fcs == L2CAP_FCS_CRC16)
5083 len -= L2CAP_FCS_SIZE;
5085 if (len > chan->mps) {
5086 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5090 if (!control->sframe) {
5093 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5094 control->sar, control->reqseq, control->final,
5097 /* Validate F-bit - F=0 always valid, F=1 only
5098 * valid in TX WAIT_F
5100 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5103 if (chan->mode != L2CAP_MODE_STREAMING) {
5104 event = L2CAP_EV_RECV_IFRAME;
5105 err = l2cap_rx(chan, control, skb, event);
5107 err = l2cap_stream_rx(chan, control, skb);
5111 l2cap_send_disconn_req(chan->conn, chan,
5114 const u8 rx_func_to_event[4] = {
5115 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5116 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5119 /* Only I-frames are expected in streaming mode */
5120 if (chan->mode == L2CAP_MODE_STREAMING)
5123 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5124 control->reqseq, control->final, control->poll,
5129 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5133 /* Validate F and P bits */
5134 if (control->final && (control->poll ||
5135 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5138 event = rx_func_to_event[control->super];
5139 if (l2cap_rx(chan, control, skb, event))
5140 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5150 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5151 struct sk_buff *skb)
5153 struct l2cap_chan *chan;
5155 chan = l2cap_get_chan_by_scid(conn, cid);
5157 if (cid == L2CAP_CID_A2MP) {
5158 chan = a2mp_channel_create(conn, skb);
5164 l2cap_chan_lock(chan);
5166 BT_DBG("unknown cid 0x%4.4x", cid);
5167 /* Drop packet and return */
5173 BT_DBG("chan %p, len %d", chan, skb->len);
5175 if (chan->state != BT_CONNECTED)
5178 switch (chan->mode) {
5179 case L2CAP_MODE_BASIC:
5180 /* If socket recv buffers overflows we drop data here
5181 * which is *bad* because L2CAP has to be reliable.
5182 * But we don't have any other choice. L2CAP doesn't
5183 * provide flow control mechanism. */
5185 if (chan->imtu < skb->len)
5188 if (!chan->ops->recv(chan, skb))
5192 case L2CAP_MODE_ERTM:
5193 case L2CAP_MODE_STREAMING:
5194 l2cap_data_rcv(chan, skb);
5198 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5206 l2cap_chan_unlock(chan);
5209 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5210 struct sk_buff *skb)
5212 struct l2cap_chan *chan;
5214 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5218 BT_DBG("chan %p, len %d", chan, skb->len);
5220 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5223 if (chan->imtu < skb->len)
5226 if (!chan->ops->recv(chan, skb))
5233 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5234 struct sk_buff *skb)
5236 struct l2cap_chan *chan;
5238 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5242 BT_DBG("chan %p, len %d", chan, skb->len);
5244 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5247 if (chan->imtu < skb->len)
5250 if (!chan->ops->recv(chan, skb))
5257 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5259 struct l2cap_hdr *lh = (void *) skb->data;
5263 skb_pull(skb, L2CAP_HDR_SIZE);
5264 cid = __le16_to_cpu(lh->cid);
5265 len = __le16_to_cpu(lh->len);
5267 if (len != skb->len) {
5272 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5275 case L2CAP_CID_LE_SIGNALING:
5276 case L2CAP_CID_SIGNALING:
5277 l2cap_sig_channel(conn, skb);
5280 case L2CAP_CID_CONN_LESS:
5281 psm = get_unaligned((__le16 *) skb->data);
5282 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5283 l2cap_conless_channel(conn, psm, skb);
5286 case L2CAP_CID_LE_DATA:
5287 l2cap_att_channel(conn, cid, skb);
5291 if (smp_sig_channel(conn, skb))
5292 l2cap_conn_del(conn->hcon, EACCES);
5296 l2cap_data_channel(conn, cid, skb);
5301 /* ---- L2CAP interface with lower layer (HCI) ---- */
5303 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5305 int exact = 0, lm1 = 0, lm2 = 0;
5306 struct l2cap_chan *c;
5308 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5310 /* Find listening sockets and check their link_mode */
5311 read_lock(&chan_list_lock);
5312 list_for_each_entry(c, &chan_list, global_l) {
5313 struct sock *sk = c->sk;
5315 if (c->state != BT_LISTEN)
5318 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5319 lm1 |= HCI_LM_ACCEPT;
5320 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5321 lm1 |= HCI_LM_MASTER;
5323 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5324 lm2 |= HCI_LM_ACCEPT;
5325 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5326 lm2 |= HCI_LM_MASTER;
5329 read_unlock(&chan_list_lock);
5331 return exact ? lm1 : lm2;
5334 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5336 struct l2cap_conn *conn;
5338 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5341 conn = l2cap_conn_add(hcon, status);
5343 l2cap_conn_ready(conn);
5345 l2cap_conn_del(hcon, bt_to_errno(status));
5350 int l2cap_disconn_ind(struct hci_conn *hcon)
5352 struct l2cap_conn *conn = hcon->l2cap_data;
5354 BT_DBG("hcon %p", hcon);
5357 return HCI_ERROR_REMOTE_USER_TERM;
5358 return conn->disc_reason;
5361 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5363 BT_DBG("hcon %p reason %d", hcon, reason);
5365 l2cap_conn_del(hcon, bt_to_errno(reason));
5369 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5371 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5374 if (encrypt == 0x00) {
5375 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5376 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5377 } else if (chan->sec_level == BT_SECURITY_HIGH)
5378 l2cap_chan_close(chan, ECONNREFUSED);
5380 if (chan->sec_level == BT_SECURITY_MEDIUM)
5381 __clear_chan_timer(chan);
5385 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5387 struct l2cap_conn *conn = hcon->l2cap_data;
5388 struct l2cap_chan *chan;
5393 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5395 if (hcon->type == LE_LINK) {
5396 if (!status && encrypt)
5397 smp_distribute_keys(conn, 0);
5398 cancel_delayed_work(&conn->security_timer);
5401 mutex_lock(&conn->chan_lock);
5403 list_for_each_entry(chan, &conn->chan_l, list) {
5404 l2cap_chan_lock(chan);
5406 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5407 state_to_string(chan->state));
5409 if (chan->scid == L2CAP_CID_LE_DATA) {
5410 if (!status && encrypt) {
5411 chan->sec_level = hcon->sec_level;
5412 l2cap_chan_ready(chan);
5415 l2cap_chan_unlock(chan);
5419 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5420 l2cap_chan_unlock(chan);
5424 if (!status && (chan->state == BT_CONNECTED ||
5425 chan->state == BT_CONFIG)) {
5426 struct sock *sk = chan->sk;
5428 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5429 sk->sk_state_change(sk);
5431 l2cap_check_encryption(chan, encrypt);
5432 l2cap_chan_unlock(chan);
5436 if (chan->state == BT_CONNECT) {
5438 l2cap_send_conn_req(chan);
5440 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5442 } else if (chan->state == BT_CONNECT2) {
5443 struct sock *sk = chan->sk;
5444 struct l2cap_conn_rsp rsp;
5450 if (test_bit(BT_SK_DEFER_SETUP,
5451 &bt_sk(sk)->flags)) {
5452 struct sock *parent = bt_sk(sk)->parent;
5453 res = L2CAP_CR_PEND;
5454 stat = L2CAP_CS_AUTHOR_PEND;
5456 parent->sk_data_ready(parent, 0);
5458 __l2cap_state_change(chan, BT_CONFIG);
5459 res = L2CAP_CR_SUCCESS;
5460 stat = L2CAP_CS_NO_INFO;
5463 __l2cap_state_change(chan, BT_DISCONN);
5464 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5465 res = L2CAP_CR_SEC_BLOCK;
5466 stat = L2CAP_CS_NO_INFO;
5471 rsp.scid = cpu_to_le16(chan->dcid);
5472 rsp.dcid = cpu_to_le16(chan->scid);
5473 rsp.result = cpu_to_le16(res);
5474 rsp.status = cpu_to_le16(stat);
5475 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5478 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5479 res == L2CAP_CR_SUCCESS) {
5481 set_bit(CONF_REQ_SENT, &chan->conf_state);
5482 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5484 l2cap_build_conf_req(chan, buf),
5486 chan->num_conf_req++;
5490 l2cap_chan_unlock(chan);
5493 mutex_unlock(&conn->chan_lock);
5498 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5500 struct l2cap_conn *conn = hcon->l2cap_data;
5503 conn = l2cap_conn_add(hcon, 0);
5508 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5510 if (!(flags & ACL_CONT)) {
5511 struct l2cap_hdr *hdr;
5515 BT_ERR("Unexpected start frame (len %d)", skb->len);
5516 kfree_skb(conn->rx_skb);
5517 conn->rx_skb = NULL;
5519 l2cap_conn_unreliable(conn, ECOMM);
5522 /* Start fragment always begin with Basic L2CAP header */
5523 if (skb->len < L2CAP_HDR_SIZE) {
5524 BT_ERR("Frame is too short (len %d)", skb->len);
5525 l2cap_conn_unreliable(conn, ECOMM);
5529 hdr = (struct l2cap_hdr *) skb->data;
5530 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5532 if (len == skb->len) {
5533 /* Complete frame received */
5534 l2cap_recv_frame(conn, skb);
5538 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5540 if (skb->len > len) {
5541 BT_ERR("Frame is too long (len %d, expected len %d)",
5543 l2cap_conn_unreliable(conn, ECOMM);
5547 /* Allocate skb for the complete frame (with header) */
5548 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5552 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5554 conn->rx_len = len - skb->len;
5556 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5558 if (!conn->rx_len) {
5559 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5560 l2cap_conn_unreliable(conn, ECOMM);
5564 if (skb->len > conn->rx_len) {
5565 BT_ERR("Fragment is too long (len %d, expected %d)",
5566 skb->len, conn->rx_len);
5567 kfree_skb(conn->rx_skb);
5568 conn->rx_skb = NULL;
5570 l2cap_conn_unreliable(conn, ECOMM);
5574 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5576 conn->rx_len -= skb->len;
5578 if (!conn->rx_len) {
5579 /* Complete frame received */
5580 l2cap_recv_frame(conn, conn->rx_skb);
5581 conn->rx_skb = NULL;
5590 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5592 struct l2cap_chan *c;
5594 read_lock(&chan_list_lock);
5596 list_for_each_entry(c, &chan_list, global_l) {
5597 struct sock *sk = c->sk;
5599 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5600 batostr(&bt_sk(sk)->src),
5601 batostr(&bt_sk(sk)->dst),
5602 c->state, __le16_to_cpu(c->psm),
5603 c->scid, c->dcid, c->imtu, c->omtu,
5604 c->sec_level, c->mode);
5607 read_unlock(&chan_list_lock);
5612 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5614 return single_open(file, l2cap_debugfs_show, inode->i_private);
5617 static const struct file_operations l2cap_debugfs_fops = {
5618 .open = l2cap_debugfs_open,
5620 .llseek = seq_lseek,
5621 .release = single_release,
5624 static struct dentry *l2cap_debugfs;
5626 int __init l2cap_init(void)
5630 err = l2cap_init_sockets();
5635 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5636 bt_debugfs, NULL, &l2cap_debugfs_fops);
5638 BT_ERR("Failed to create L2CAP debug file");
5644 void l2cap_exit(void)
5646 debugfs_remove(l2cap_debugfs);
5647 l2cap_cleanup_sockets();
5650 module_param(disable_ertm, bool, 0644);
5651 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");