2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
67 list_for_each_entry(c, &conn->chan_l, list) {
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
78 list_for_each_entry(c, &conn->chan_l, list) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
95 mutex_unlock(&conn->chan_lock);
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
102 struct l2cap_chan *c;
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
113 struct l2cap_chan *c;
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
126 write_lock(&chan_list_lock);
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
151 write_unlock(&chan_list_lock);
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
157 write_lock(&chan_list_lock);
161 write_unlock(&chan_list_lock);
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
184 chan->ops->state_change(chan, state);
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
189 struct sock *sk = chan->sk;
192 __l2cap_state_change(chan, state);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
198 struct sock *sk = chan->sk;
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
205 struct sock *sk = chan->sk;
208 __l2cap_chan_set_err(chan, err);
212 static void __set_retrans_timer(struct l2cap_chan *chan)
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
221 static void __set_monitor_timer(struct l2cap_chan *chan)
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
435 chan->sec_level = BT_SECURITY_LOW;
437 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
440 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
442 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
443 __le16_to_cpu(chan->psm), chan->dcid);
445 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
449 switch (chan->chan_type) {
450 case L2CAP_CHAN_CONN_ORIENTED:
451 if (conn->hcon->type == LE_LINK) {
453 chan->omtu = L2CAP_DEFAULT_MTU;
454 chan->scid = L2CAP_CID_LE_DATA;
455 chan->dcid = L2CAP_CID_LE_DATA;
457 /* Alloc CID for connection-oriented socket */
458 chan->scid = l2cap_alloc_cid(conn);
459 chan->omtu = L2CAP_DEFAULT_MTU;
463 case L2CAP_CHAN_CONN_LESS:
464 /* Connectionless socket */
465 chan->scid = L2CAP_CID_CONN_LESS;
466 chan->dcid = L2CAP_CID_CONN_LESS;
467 chan->omtu = L2CAP_DEFAULT_MTU;
470 case L2CAP_CHAN_CONN_FIX_A2MP:
471 chan->scid = L2CAP_CID_A2MP;
472 chan->dcid = L2CAP_CID_A2MP;
473 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
474 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
478 /* Raw socket can send/recv signalling messages only */
479 chan->scid = L2CAP_CID_SIGNALING;
480 chan->dcid = L2CAP_CID_SIGNALING;
481 chan->omtu = L2CAP_DEFAULT_MTU;
484 chan->local_id = L2CAP_BESTEFFORT_ID;
485 chan->local_stype = L2CAP_SERV_BESTEFFORT;
486 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
487 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
488 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
489 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
491 l2cap_chan_hold(chan);
493 list_add(&chan->list, &conn->chan_l);
496 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
498 mutex_lock(&conn->chan_lock);
499 __l2cap_chan_add(conn, chan);
500 mutex_unlock(&conn->chan_lock);
503 void l2cap_chan_del(struct l2cap_chan *chan, int err)
505 struct l2cap_conn *conn = chan->conn;
507 __clear_chan_timer(chan);
509 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
512 /* Delete from channel list */
513 list_del(&chan->list);
515 l2cap_chan_put(chan);
519 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
520 hci_conn_put(conn->hcon);
523 if (chan->ops->teardown)
524 chan->ops->teardown(chan, err);
526 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
530 case L2CAP_MODE_BASIC:
533 case L2CAP_MODE_ERTM:
534 __clear_retrans_timer(chan);
535 __clear_monitor_timer(chan);
536 __clear_ack_timer(chan);
538 skb_queue_purge(&chan->srej_q);
540 l2cap_seq_list_free(&chan->srej_list);
541 l2cap_seq_list_free(&chan->retrans_list);
545 case L2CAP_MODE_STREAMING:
546 skb_queue_purge(&chan->tx_q);
553 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
555 struct l2cap_conn *conn = chan->conn;
556 struct sock *sk = chan->sk;
558 BT_DBG("chan %p state %s sk %p", chan,
559 state_to_string(chan->state), sk);
561 switch (chan->state) {
563 if (chan->ops->teardown)
564 chan->ops->teardown(chan, 0);
569 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
570 conn->hcon->type == ACL_LINK) {
571 __set_chan_timer(chan, sk->sk_sndtimeo);
572 l2cap_send_disconn_req(conn, chan, reason);
574 l2cap_chan_del(chan, reason);
578 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
579 conn->hcon->type == ACL_LINK) {
580 struct l2cap_conn_rsp rsp;
583 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
584 result = L2CAP_CR_SEC_BLOCK;
586 result = L2CAP_CR_BAD_PSM;
587 l2cap_state_change(chan, BT_DISCONN);
589 rsp.scid = cpu_to_le16(chan->dcid);
590 rsp.dcid = cpu_to_le16(chan->scid);
591 rsp.result = cpu_to_le16(result);
592 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
593 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
597 l2cap_chan_del(chan, reason);
602 l2cap_chan_del(chan, reason);
606 if (chan->ops->teardown)
607 chan->ops->teardown(chan, 0);
612 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
614 if (chan->chan_type == L2CAP_CHAN_RAW) {
615 switch (chan->sec_level) {
616 case BT_SECURITY_HIGH:
617 return HCI_AT_DEDICATED_BONDING_MITM;
618 case BT_SECURITY_MEDIUM:
619 return HCI_AT_DEDICATED_BONDING;
621 return HCI_AT_NO_BONDING;
623 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
624 if (chan->sec_level == BT_SECURITY_LOW)
625 chan->sec_level = BT_SECURITY_SDP;
627 if (chan->sec_level == BT_SECURITY_HIGH)
628 return HCI_AT_NO_BONDING_MITM;
630 return HCI_AT_NO_BONDING;
632 switch (chan->sec_level) {
633 case BT_SECURITY_HIGH:
634 return HCI_AT_GENERAL_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_GENERAL_BONDING;
638 return HCI_AT_NO_BONDING;
643 /* Service level security */
644 int l2cap_chan_check_security(struct l2cap_chan *chan)
646 struct l2cap_conn *conn = chan->conn;
649 auth_type = l2cap_get_auth_type(chan);
651 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
654 static u8 l2cap_get_ident(struct l2cap_conn *conn)
658 /* Get next available identificator.
659 * 1 - 128 are used by kernel.
660 * 129 - 199 are reserved.
661 * 200 - 254 are used by utilities like l2ping, etc.
664 spin_lock(&conn->lock);
666 if (++conn->tx_ident > 128)
671 spin_unlock(&conn->lock);
676 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
678 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
681 BT_DBG("code 0x%2.2x", code);
686 if (lmp_no_flush_capable(conn->hcon->hdev))
687 flags = ACL_START_NO_FLUSH;
691 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
692 skb->priority = HCI_PRIO_MAX;
694 hci_send_acl(conn->hchan, skb, flags);
697 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
699 struct hci_conn *hcon = chan->conn->hcon;
702 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
705 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
706 lmp_no_flush_capable(hcon->hdev))
707 flags = ACL_START_NO_FLUSH;
711 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
712 hci_send_acl(chan->conn->hchan, skb, flags);
715 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
717 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
718 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
720 if (enh & L2CAP_CTRL_FRAME_TYPE) {
723 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
724 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
731 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
732 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
739 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
741 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
742 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
744 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
747 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
748 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
755 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
756 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
763 static inline void __unpack_control(struct l2cap_chan *chan,
766 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
767 __unpack_extended_control(get_unaligned_le32(skb->data),
768 &bt_cb(skb)->control);
769 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
771 __unpack_enhanced_control(get_unaligned_le16(skb->data),
772 &bt_cb(skb)->control);
773 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
777 static u32 __pack_extended_control(struct l2cap_ctrl *control)
781 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
782 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
784 if (control->sframe) {
785 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
786 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
787 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
789 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
790 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
796 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
800 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_CTRL_FRAME_TYPE;
808 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
815 static inline void __pack_control(struct l2cap_chan *chan,
816 struct l2cap_ctrl *control,
819 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
820 put_unaligned_le32(__pack_extended_control(control),
821 skb->data + L2CAP_HDR_SIZE);
823 put_unaligned_le16(__pack_enhanced_control(control),
824 skb->data + L2CAP_HDR_SIZE);
828 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
830 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
831 return L2CAP_EXT_HDR_SIZE;
833 return L2CAP_ENH_HDR_SIZE;
836 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
840 struct l2cap_hdr *lh;
841 int hlen = __ertm_hdr_size(chan);
843 if (chan->fcs == L2CAP_FCS_CRC16)
844 hlen += L2CAP_FCS_SIZE;
846 skb = bt_skb_alloc(hlen, GFP_KERNEL);
849 return ERR_PTR(-ENOMEM);
851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
852 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
853 lh->cid = cpu_to_le16(chan->dcid);
855 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
856 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
858 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
860 if (chan->fcs == L2CAP_FCS_CRC16) {
861 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
862 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
865 skb->priority = HCI_PRIO_MAX;
869 static void l2cap_send_sframe(struct l2cap_chan *chan,
870 struct l2cap_ctrl *control)
875 BT_DBG("chan %p, control %p", chan, control);
877 if (!control->sframe)
880 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
884 if (control->super == L2CAP_SUPER_RR)
885 clear_bit(CONN_RNR_SENT, &chan->conn_state);
886 else if (control->super == L2CAP_SUPER_RNR)
887 set_bit(CONN_RNR_SENT, &chan->conn_state);
889 if (control->super != L2CAP_SUPER_SREJ) {
890 chan->last_acked_seq = control->reqseq;
891 __clear_ack_timer(chan);
894 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
895 control->final, control->poll, control->super);
897 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
898 control_field = __pack_extended_control(control);
900 control_field = __pack_enhanced_control(control);
902 skb = l2cap_create_sframe_pdu(chan, control_field);
904 l2cap_do_send(chan, skb);
907 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
909 struct l2cap_ctrl control;
911 BT_DBG("chan %p, poll %d", chan, poll);
913 memset(&control, 0, sizeof(control));
917 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
918 control.super = L2CAP_SUPER_RNR;
920 control.super = L2CAP_SUPER_RR;
922 control.reqseq = chan->buffer_seq;
923 l2cap_send_sframe(chan, &control);
926 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
928 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
931 static void l2cap_send_conn_req(struct l2cap_chan *chan)
933 struct l2cap_conn *conn = chan->conn;
934 struct l2cap_conn_req req;
936 req.scid = cpu_to_le16(chan->scid);
939 chan->ident = l2cap_get_ident(conn);
941 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
943 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
946 static void l2cap_chan_ready(struct l2cap_chan *chan)
948 /* This clears all conf flags, including CONF_NOT_COMPLETE */
949 chan->conf_state = 0;
950 __clear_chan_timer(chan);
952 chan->state = BT_CONNECTED;
954 chan->ops->ready(chan);
957 static void l2cap_do_start(struct l2cap_chan *chan)
959 struct l2cap_conn *conn = chan->conn;
961 if (conn->hcon->type == LE_LINK) {
962 l2cap_chan_ready(chan);
966 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
967 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
970 if (l2cap_chan_check_security(chan) &&
971 __l2cap_no_conn_pending(chan))
972 l2cap_send_conn_req(chan);
974 struct l2cap_info_req req;
975 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
978 conn->info_ident = l2cap_get_ident(conn);
980 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
982 l2cap_send_cmd(conn, conn->info_ident,
983 L2CAP_INFO_REQ, sizeof(req), &req);
987 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
989 u32 local_feat_mask = l2cap_feat_mask;
991 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
994 case L2CAP_MODE_ERTM:
995 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
996 case L2CAP_MODE_STREAMING:
997 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1003 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1005 struct sock *sk = chan->sk;
1006 struct l2cap_disconn_req req;
1011 if (chan->mode == L2CAP_MODE_ERTM) {
1012 __clear_retrans_timer(chan);
1013 __clear_monitor_timer(chan);
1014 __clear_ack_timer(chan);
1017 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1018 __l2cap_state_change(chan, BT_DISCONN);
1022 req.dcid = cpu_to_le16(chan->dcid);
1023 req.scid = cpu_to_le16(chan->scid);
1024 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1025 L2CAP_DISCONN_REQ, sizeof(req), &req);
1028 __l2cap_state_change(chan, BT_DISCONN);
1029 __l2cap_chan_set_err(chan, err);
1033 /* ---- L2CAP connections ---- */
1034 static void l2cap_conn_start(struct l2cap_conn *conn)
1036 struct l2cap_chan *chan, *tmp;
1038 BT_DBG("conn %p", conn);
1040 mutex_lock(&conn->chan_lock);
1042 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1043 struct sock *sk = chan->sk;
1045 l2cap_chan_lock(chan);
1047 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1048 l2cap_chan_unlock(chan);
1052 if (chan->state == BT_CONNECT) {
1053 if (!l2cap_chan_check_security(chan) ||
1054 !__l2cap_no_conn_pending(chan)) {
1055 l2cap_chan_unlock(chan);
1059 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1060 && test_bit(CONF_STATE2_DEVICE,
1061 &chan->conf_state)) {
1062 l2cap_chan_close(chan, ECONNRESET);
1063 l2cap_chan_unlock(chan);
1067 l2cap_send_conn_req(chan);
1069 } else if (chan->state == BT_CONNECT2) {
1070 struct l2cap_conn_rsp rsp;
1072 rsp.scid = cpu_to_le16(chan->dcid);
1073 rsp.dcid = cpu_to_le16(chan->scid);
1075 if (l2cap_chan_check_security(chan)) {
1077 if (test_bit(BT_SK_DEFER_SETUP,
1078 &bt_sk(sk)->flags)) {
1079 struct sock *parent = bt_sk(sk)->parent;
1080 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1081 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1083 parent->sk_data_ready(parent, 0);
1086 __l2cap_state_change(chan, BT_CONFIG);
1087 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1088 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1092 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1093 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1096 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1099 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1100 rsp.result != L2CAP_CR_SUCCESS) {
1101 l2cap_chan_unlock(chan);
1105 set_bit(CONF_REQ_SENT, &chan->conf_state);
1106 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1107 l2cap_build_conf_req(chan, buf), buf);
1108 chan->num_conf_req++;
1111 l2cap_chan_unlock(chan);
1114 mutex_unlock(&conn->chan_lock);
1117 /* Find socket with cid and source/destination bdaddr.
1118 * Returns closest match, locked.
1120 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1124 struct l2cap_chan *c, *c1 = NULL;
1126 read_lock(&chan_list_lock);
1128 list_for_each_entry(c, &chan_list, global_l) {
1129 struct sock *sk = c->sk;
1131 if (state && c->state != state)
1134 if (c->scid == cid) {
1135 int src_match, dst_match;
1136 int src_any, dst_any;
1139 src_match = !bacmp(&bt_sk(sk)->src, src);
1140 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1141 if (src_match && dst_match) {
1142 read_unlock(&chan_list_lock);
1147 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1148 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1149 if ((src_match && dst_any) || (src_any && dst_match) ||
1150 (src_any && dst_any))
1155 read_unlock(&chan_list_lock);
1160 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1162 struct sock *parent, *sk;
1163 struct l2cap_chan *chan, *pchan;
1167 /* Check if we have socket listening on cid */
1168 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1169 conn->src, conn->dst);
1177 chan = pchan->ops->new_connection(pchan);
1183 hci_conn_hold(conn->hcon);
1184 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1186 bacpy(&bt_sk(sk)->src, conn->src);
1187 bacpy(&bt_sk(sk)->dst, conn->dst);
1189 bt_accept_enqueue(parent, sk);
1191 l2cap_chan_add(conn, chan);
1193 l2cap_chan_ready(chan);
1196 release_sock(parent);
1199 static void l2cap_conn_ready(struct l2cap_conn *conn)
1201 struct l2cap_chan *chan;
1203 BT_DBG("conn %p", conn);
1205 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1206 l2cap_le_conn_ready(conn);
1208 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1209 smp_conn_security(conn, conn->hcon->pending_sec_level);
1211 mutex_lock(&conn->chan_lock);
1213 list_for_each_entry(chan, &conn->chan_l, list) {
1215 l2cap_chan_lock(chan);
1217 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1218 l2cap_chan_unlock(chan);
1222 if (conn->hcon->type == LE_LINK) {
1223 if (smp_conn_security(conn, chan->sec_level))
1224 l2cap_chan_ready(chan);
1226 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1227 struct sock *sk = chan->sk;
1228 __clear_chan_timer(chan);
1230 __l2cap_state_change(chan, BT_CONNECTED);
1231 sk->sk_state_change(sk);
1234 } else if (chan->state == BT_CONNECT)
1235 l2cap_do_start(chan);
1237 l2cap_chan_unlock(chan);
1240 mutex_unlock(&conn->chan_lock);
1243 /* Notify sockets that we cannot guaranty reliability anymore */
1244 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1246 struct l2cap_chan *chan;
1248 BT_DBG("conn %p", conn);
1250 mutex_lock(&conn->chan_lock);
1252 list_for_each_entry(chan, &conn->chan_l, list) {
1253 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1254 __l2cap_chan_set_err(chan, err);
1257 mutex_unlock(&conn->chan_lock);
1260 static void l2cap_info_timeout(struct work_struct *work)
1262 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1265 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1266 conn->info_ident = 0;
1268 l2cap_conn_start(conn);
1271 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1273 struct l2cap_conn *conn = hcon->l2cap_data;
1274 struct l2cap_chan *chan, *l;
1279 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1281 kfree_skb(conn->rx_skb);
1283 mutex_lock(&conn->chan_lock);
1286 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1287 l2cap_chan_hold(chan);
1288 l2cap_chan_lock(chan);
1290 l2cap_chan_del(chan, err);
1292 l2cap_chan_unlock(chan);
1294 chan->ops->close(chan);
1295 l2cap_chan_put(chan);
1298 mutex_unlock(&conn->chan_lock);
1300 hci_chan_del(conn->hchan);
1302 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1303 cancel_delayed_work_sync(&conn->info_timer);
1305 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1306 cancel_delayed_work_sync(&conn->security_timer);
1307 smp_chan_destroy(conn);
1310 hcon->l2cap_data = NULL;
1314 static void security_timeout(struct work_struct *work)
1316 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1317 security_timer.work);
1319 BT_DBG("conn %p", conn);
1321 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1322 smp_chan_destroy(conn);
1323 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1327 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1329 struct l2cap_conn *conn = hcon->l2cap_data;
1330 struct hci_chan *hchan;
1335 hchan = hci_chan_create(hcon);
1339 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1341 hci_chan_del(hchan);
1345 hcon->l2cap_data = conn;
1347 conn->hchan = hchan;
1349 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1351 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1352 conn->mtu = hcon->hdev->le_mtu;
1354 conn->mtu = hcon->hdev->acl_mtu;
1356 conn->src = &hcon->hdev->bdaddr;
1357 conn->dst = &hcon->dst;
1359 conn->feat_mask = 0;
1361 spin_lock_init(&conn->lock);
1362 mutex_init(&conn->chan_lock);
1364 INIT_LIST_HEAD(&conn->chan_l);
1366 if (hcon->type == LE_LINK)
1367 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1369 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1371 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1376 /* ---- Socket interface ---- */
1378 /* Find socket with psm and source / destination bdaddr.
1379 * Returns closest match.
1381 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1385 struct l2cap_chan *c, *c1 = NULL;
1387 read_lock(&chan_list_lock);
1389 list_for_each_entry(c, &chan_list, global_l) {
1390 struct sock *sk = c->sk;
1392 if (state && c->state != state)
1395 if (c->psm == psm) {
1396 int src_match, dst_match;
1397 int src_any, dst_any;
1400 src_match = !bacmp(&bt_sk(sk)->src, src);
1401 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1402 if (src_match && dst_match) {
1403 read_unlock(&chan_list_lock);
1408 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1409 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1410 if ((src_match && dst_any) || (src_any && dst_match) ||
1411 (src_any && dst_any))
1416 read_unlock(&chan_list_lock);
1421 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1422 bdaddr_t *dst, u8 dst_type)
1424 struct sock *sk = chan->sk;
1425 bdaddr_t *src = &bt_sk(sk)->src;
1426 struct l2cap_conn *conn;
1427 struct hci_conn *hcon;
1428 struct hci_dev *hdev;
1432 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1433 dst_type, __le16_to_cpu(chan->psm));
1435 hdev = hci_get_route(dst, src);
1437 return -EHOSTUNREACH;
1441 l2cap_chan_lock(chan);
1443 /* PSM must be odd and lsb of upper byte must be 0 */
1444 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1445 chan->chan_type != L2CAP_CHAN_RAW) {
1450 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1455 switch (chan->mode) {
1456 case L2CAP_MODE_BASIC:
1458 case L2CAP_MODE_ERTM:
1459 case L2CAP_MODE_STREAMING:
1468 switch (chan->state) {
1472 /* Already connecting */
1477 /* Already connected */
1491 /* Set destination address and psm */
1493 bacpy(&bt_sk(sk)->dst, dst);
1499 auth_type = l2cap_get_auth_type(chan);
1501 if (chan->dcid == L2CAP_CID_LE_DATA)
1502 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1503 chan->sec_level, auth_type);
1505 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1506 chan->sec_level, auth_type);
1509 err = PTR_ERR(hcon);
1513 conn = l2cap_conn_add(hcon, 0);
1520 if (hcon->type == LE_LINK) {
1523 if (!list_empty(&conn->chan_l)) {
1532 /* Update source addr of the socket */
1533 bacpy(src, conn->src);
1535 l2cap_chan_unlock(chan);
1536 l2cap_chan_add(conn, chan);
1537 l2cap_chan_lock(chan);
1539 l2cap_state_change(chan, BT_CONNECT);
1540 __set_chan_timer(chan, sk->sk_sndtimeo);
1542 if (hcon->state == BT_CONNECTED) {
1543 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1544 __clear_chan_timer(chan);
1545 if (l2cap_chan_check_security(chan))
1546 l2cap_state_change(chan, BT_CONNECTED);
1548 l2cap_do_start(chan);
1554 l2cap_chan_unlock(chan);
1555 hci_dev_unlock(hdev);
1560 int __l2cap_wait_ack(struct sock *sk)
1562 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1563 DECLARE_WAITQUEUE(wait, current);
1567 add_wait_queue(sk_sleep(sk), &wait);
1568 set_current_state(TASK_INTERRUPTIBLE);
1569 while (chan->unacked_frames > 0 && chan->conn) {
1573 if (signal_pending(current)) {
1574 err = sock_intr_errno(timeo);
1579 timeo = schedule_timeout(timeo);
1581 set_current_state(TASK_INTERRUPTIBLE);
1583 err = sock_error(sk);
1587 set_current_state(TASK_RUNNING);
1588 remove_wait_queue(sk_sleep(sk), &wait);
1592 static void l2cap_monitor_timeout(struct work_struct *work)
1594 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1595 monitor_timer.work);
1597 BT_DBG("chan %p", chan);
1599 l2cap_chan_lock(chan);
1602 l2cap_chan_unlock(chan);
1603 l2cap_chan_put(chan);
1607 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1609 l2cap_chan_unlock(chan);
1610 l2cap_chan_put(chan);
1613 static void l2cap_retrans_timeout(struct work_struct *work)
1615 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1616 retrans_timer.work);
1618 BT_DBG("chan %p", chan);
1620 l2cap_chan_lock(chan);
1623 l2cap_chan_unlock(chan);
1624 l2cap_chan_put(chan);
1628 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1629 l2cap_chan_unlock(chan);
1630 l2cap_chan_put(chan);
1633 static void l2cap_streaming_send(struct l2cap_chan *chan,
1634 struct sk_buff_head *skbs)
1636 struct sk_buff *skb;
1637 struct l2cap_ctrl *control;
1639 BT_DBG("chan %p, skbs %p", chan, skbs);
1641 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1643 while (!skb_queue_empty(&chan->tx_q)) {
1645 skb = skb_dequeue(&chan->tx_q);
1647 bt_cb(skb)->control.retries = 1;
1648 control = &bt_cb(skb)->control;
1650 control->reqseq = 0;
1651 control->txseq = chan->next_tx_seq;
1653 __pack_control(chan, control, skb);
1655 if (chan->fcs == L2CAP_FCS_CRC16) {
1656 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1657 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1660 l2cap_do_send(chan, skb);
1662 BT_DBG("Sent txseq %u", control->txseq);
1664 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1665 chan->frames_sent++;
1669 static int l2cap_ertm_send(struct l2cap_chan *chan)
1671 struct sk_buff *skb, *tx_skb;
1672 struct l2cap_ctrl *control;
1675 BT_DBG("chan %p", chan);
1677 if (chan->state != BT_CONNECTED)
1680 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1683 while (chan->tx_send_head &&
1684 chan->unacked_frames < chan->remote_tx_win &&
1685 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1687 skb = chan->tx_send_head;
1689 bt_cb(skb)->control.retries = 1;
1690 control = &bt_cb(skb)->control;
1692 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1695 control->reqseq = chan->buffer_seq;
1696 chan->last_acked_seq = chan->buffer_seq;
1697 control->txseq = chan->next_tx_seq;
1699 __pack_control(chan, control, skb);
1701 if (chan->fcs == L2CAP_FCS_CRC16) {
1702 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1703 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1706 /* Clone after data has been modified. Data is assumed to be
1707 read-only (for locking purposes) on cloned sk_buffs.
1709 tx_skb = skb_clone(skb, GFP_KERNEL);
1714 __set_retrans_timer(chan);
1716 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1717 chan->unacked_frames++;
1718 chan->frames_sent++;
1721 if (skb_queue_is_last(&chan->tx_q, skb))
1722 chan->tx_send_head = NULL;
1724 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1726 l2cap_do_send(chan, tx_skb);
1727 BT_DBG("Sent txseq %u", control->txseq);
1730 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1731 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1736 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1738 struct l2cap_ctrl control;
1739 struct sk_buff *skb;
1740 struct sk_buff *tx_skb;
1743 BT_DBG("chan %p", chan);
1745 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1748 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1749 seq = l2cap_seq_list_pop(&chan->retrans_list);
1751 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1753 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1758 bt_cb(skb)->control.retries++;
1759 control = bt_cb(skb)->control;
1761 if (chan->max_tx != 0 &&
1762 bt_cb(skb)->control.retries > chan->max_tx) {
1763 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1764 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1765 l2cap_seq_list_clear(&chan->retrans_list);
1769 control.reqseq = chan->buffer_seq;
1770 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1775 if (skb_cloned(skb)) {
1776 /* Cloned sk_buffs are read-only, so we need a
1779 tx_skb = skb_copy(skb, GFP_ATOMIC);
1781 tx_skb = skb_clone(skb, GFP_ATOMIC);
1785 l2cap_seq_list_clear(&chan->retrans_list);
1789 /* Update skb contents */
1790 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1791 put_unaligned_le32(__pack_extended_control(&control),
1792 tx_skb->data + L2CAP_HDR_SIZE);
1794 put_unaligned_le16(__pack_enhanced_control(&control),
1795 tx_skb->data + L2CAP_HDR_SIZE);
1798 if (chan->fcs == L2CAP_FCS_CRC16) {
1799 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1800 put_unaligned_le16(fcs, skb_put(tx_skb,
1804 l2cap_do_send(chan, tx_skb);
1806 BT_DBG("Resent txseq %d", control.txseq);
1808 chan->last_acked_seq = chan->buffer_seq;
1812 static void l2cap_retransmit(struct l2cap_chan *chan,
1813 struct l2cap_ctrl *control)
1815 BT_DBG("chan %p, control %p", chan, control);
1817 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1818 l2cap_ertm_resend(chan);
1821 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1822 struct l2cap_ctrl *control)
1824 struct sk_buff *skb;
1826 BT_DBG("chan %p, control %p", chan, control);
1829 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1831 l2cap_seq_list_clear(&chan->retrans_list);
1833 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1836 if (chan->unacked_frames) {
1837 skb_queue_walk(&chan->tx_q, skb) {
1838 if (bt_cb(skb)->control.txseq == control->reqseq ||
1839 skb == chan->tx_send_head)
1843 skb_queue_walk_from(&chan->tx_q, skb) {
1844 if (skb == chan->tx_send_head)
1847 l2cap_seq_list_append(&chan->retrans_list,
1848 bt_cb(skb)->control.txseq);
1851 l2cap_ertm_resend(chan);
1855 static void l2cap_send_ack(struct l2cap_chan *chan)
1857 struct l2cap_ctrl control;
1858 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1859 chan->last_acked_seq);
1862 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1863 chan, chan->last_acked_seq, chan->buffer_seq);
1865 memset(&control, 0, sizeof(control));
1868 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1869 chan->rx_state == L2CAP_RX_STATE_RECV) {
1870 __clear_ack_timer(chan);
1871 control.super = L2CAP_SUPER_RNR;
1872 control.reqseq = chan->buffer_seq;
1873 l2cap_send_sframe(chan, &control);
1875 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1876 l2cap_ertm_send(chan);
1877 /* If any i-frames were sent, they included an ack */
1878 if (chan->buffer_seq == chan->last_acked_seq)
1882 /* Ack now if the window is 3/4ths full.
1883 * Calculate without mul or div
1885 threshold = chan->ack_win;
1886 threshold += threshold << 1;
1889 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1892 if (frames_to_ack >= threshold) {
1893 __clear_ack_timer(chan);
1894 control.super = L2CAP_SUPER_RR;
1895 control.reqseq = chan->buffer_seq;
1896 l2cap_send_sframe(chan, &control);
1901 __set_ack_timer(chan);
1905 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1906 struct msghdr *msg, int len,
1907 int count, struct sk_buff *skb)
1909 struct l2cap_conn *conn = chan->conn;
1910 struct sk_buff **frag;
1913 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1919 /* Continuation fragments (no L2CAP header) */
1920 frag = &skb_shinfo(skb)->frag_list;
1922 struct sk_buff *tmp;
1924 count = min_t(unsigned int, conn->mtu, len);
1926 tmp = chan->ops->alloc_skb(chan, count,
1927 msg->msg_flags & MSG_DONTWAIT);
1929 return PTR_ERR(tmp);
1933 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1936 (*frag)->priority = skb->priority;
1941 skb->len += (*frag)->len;
1942 skb->data_len += (*frag)->len;
1944 frag = &(*frag)->next;
1950 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1951 struct msghdr *msg, size_t len,
1954 struct l2cap_conn *conn = chan->conn;
1955 struct sk_buff *skb;
1956 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1957 struct l2cap_hdr *lh;
1959 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1961 count = min_t(unsigned int, (conn->mtu - hlen), len);
1963 skb = chan->ops->alloc_skb(chan, count + hlen,
1964 msg->msg_flags & MSG_DONTWAIT);
1968 skb->priority = priority;
1970 /* Create L2CAP header */
1971 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1972 lh->cid = cpu_to_le16(chan->dcid);
1973 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1974 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1976 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1977 if (unlikely(err < 0)) {
1979 return ERR_PTR(err);
1984 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1985 struct msghdr *msg, size_t len,
1988 struct l2cap_conn *conn = chan->conn;
1989 struct sk_buff *skb;
1991 struct l2cap_hdr *lh;
1993 BT_DBG("chan %p len %zu", chan, len);
1995 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1997 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1998 msg->msg_flags & MSG_DONTWAIT);
2002 skb->priority = priority;
2004 /* Create L2CAP header */
2005 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2006 lh->cid = cpu_to_le16(chan->dcid);
2007 lh->len = cpu_to_le16(len);
2009 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2010 if (unlikely(err < 0)) {
2012 return ERR_PTR(err);
2017 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2018 struct msghdr *msg, size_t len,
2021 struct l2cap_conn *conn = chan->conn;
2022 struct sk_buff *skb;
2023 int err, count, hlen;
2024 struct l2cap_hdr *lh;
2026 BT_DBG("chan %p len %zu", chan, len);
2029 return ERR_PTR(-ENOTCONN);
2031 hlen = __ertm_hdr_size(chan);
2034 hlen += L2CAP_SDULEN_SIZE;
2036 if (chan->fcs == L2CAP_FCS_CRC16)
2037 hlen += L2CAP_FCS_SIZE;
2039 count = min_t(unsigned int, (conn->mtu - hlen), len);
2041 skb = chan->ops->alloc_skb(chan, count + hlen,
2042 msg->msg_flags & MSG_DONTWAIT);
2046 /* Create L2CAP header */
2047 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2048 lh->cid = cpu_to_le16(chan->dcid);
2049 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2051 /* Control header is populated later */
2052 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2053 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2055 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2058 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2060 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2061 if (unlikely(err < 0)) {
2063 return ERR_PTR(err);
2066 bt_cb(skb)->control.fcs = chan->fcs;
2067 bt_cb(skb)->control.retries = 0;
2071 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2072 struct sk_buff_head *seg_queue,
2073 struct msghdr *msg, size_t len)
2075 struct sk_buff *skb;
2080 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2082 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2083 * so fragmented skbs are not used. The HCI layer's handling
2084 * of fragmented skbs is not compatible with ERTM's queueing.
2087 /* PDU size is derived from the HCI MTU */
2088 pdu_len = chan->conn->mtu;
2090 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2092 /* Adjust for largest possible L2CAP overhead. */
2094 pdu_len -= L2CAP_FCS_SIZE;
2096 pdu_len -= __ertm_hdr_size(chan);
2098 /* Remote device may have requested smaller PDUs */
2099 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2101 if (len <= pdu_len) {
2102 sar = L2CAP_SAR_UNSEGMENTED;
2106 sar = L2CAP_SAR_START;
2108 pdu_len -= L2CAP_SDULEN_SIZE;
2112 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2115 __skb_queue_purge(seg_queue);
2116 return PTR_ERR(skb);
2119 bt_cb(skb)->control.sar = sar;
2120 __skb_queue_tail(seg_queue, skb);
2125 pdu_len += L2CAP_SDULEN_SIZE;
2128 if (len <= pdu_len) {
2129 sar = L2CAP_SAR_END;
2132 sar = L2CAP_SAR_CONTINUE;
2139 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2142 struct sk_buff *skb;
2144 struct sk_buff_head seg_queue;
2146 /* Connectionless channel */
2147 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2148 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2150 return PTR_ERR(skb);
2152 l2cap_do_send(chan, skb);
2156 switch (chan->mode) {
2157 case L2CAP_MODE_BASIC:
2158 /* Check outgoing MTU */
2159 if (len > chan->omtu)
2162 /* Create a basic PDU */
2163 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2165 return PTR_ERR(skb);
2167 l2cap_do_send(chan, skb);
2171 case L2CAP_MODE_ERTM:
2172 case L2CAP_MODE_STREAMING:
2173 /* Check outgoing MTU */
2174 if (len > chan->omtu) {
2179 __skb_queue_head_init(&seg_queue);
2181 /* Do segmentation before calling in to the state machine,
2182 * since it's possible to block while waiting for memory
2185 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2187 /* The channel could have been closed while segmenting,
2188 * check that it is still connected.
2190 if (chan->state != BT_CONNECTED) {
2191 __skb_queue_purge(&seg_queue);
2198 if (chan->mode == L2CAP_MODE_ERTM)
2199 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2201 l2cap_streaming_send(chan, &seg_queue);
2205 /* If the skbs were not queued for sending, they'll still be in
2206 * seg_queue and need to be purged.
2208 __skb_queue_purge(&seg_queue);
2212 BT_DBG("bad state %1.1x", chan->mode);
2219 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2221 struct l2cap_ctrl control;
2224 BT_DBG("chan %p, txseq %u", chan, txseq);
2226 memset(&control, 0, sizeof(control));
2228 control.super = L2CAP_SUPER_SREJ;
2230 for (seq = chan->expected_tx_seq; seq != txseq;
2231 seq = __next_seq(chan, seq)) {
2232 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2233 control.reqseq = seq;
2234 l2cap_send_sframe(chan, &control);
2235 l2cap_seq_list_append(&chan->srej_list, seq);
2239 chan->expected_tx_seq = __next_seq(chan, txseq);
2242 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2244 struct l2cap_ctrl control;
2246 BT_DBG("chan %p", chan);
2248 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2251 memset(&control, 0, sizeof(control));
2253 control.super = L2CAP_SUPER_SREJ;
2254 control.reqseq = chan->srej_list.tail;
2255 l2cap_send_sframe(chan, &control);
2258 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2260 struct l2cap_ctrl control;
2264 BT_DBG("chan %p, txseq %u", chan, txseq);
2266 memset(&control, 0, sizeof(control));
2268 control.super = L2CAP_SUPER_SREJ;
2270 /* Capture initial list head to allow only one pass through the list. */
2271 initial_head = chan->srej_list.head;
2274 seq = l2cap_seq_list_pop(&chan->srej_list);
2275 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2278 control.reqseq = seq;
2279 l2cap_send_sframe(chan, &control);
2280 l2cap_seq_list_append(&chan->srej_list, seq);
2281 } while (chan->srej_list.head != initial_head);
2284 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2286 struct sk_buff *acked_skb;
2289 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2291 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2294 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2295 chan->expected_ack_seq, chan->unacked_frames);
2297 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2298 ackseq = __next_seq(chan, ackseq)) {
2300 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2302 skb_unlink(acked_skb, &chan->tx_q);
2303 kfree_skb(acked_skb);
2304 chan->unacked_frames--;
2308 chan->expected_ack_seq = reqseq;
2310 if (chan->unacked_frames == 0)
2311 __clear_retrans_timer(chan);
2313 BT_DBG("unacked_frames %u", chan->unacked_frames);
2316 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2318 BT_DBG("chan %p", chan);
2320 chan->expected_tx_seq = chan->buffer_seq;
2321 l2cap_seq_list_clear(&chan->srej_list);
2322 skb_queue_purge(&chan->srej_q);
2323 chan->rx_state = L2CAP_RX_STATE_RECV;
2326 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2327 struct l2cap_ctrl *control,
2328 struct sk_buff_head *skbs, u8 event)
2330 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2334 case L2CAP_EV_DATA_REQUEST:
2335 if (chan->tx_send_head == NULL)
2336 chan->tx_send_head = skb_peek(skbs);
2338 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2339 l2cap_ertm_send(chan);
2341 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2342 BT_DBG("Enter LOCAL_BUSY");
2343 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2345 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2346 /* The SREJ_SENT state must be aborted if we are to
2347 * enter the LOCAL_BUSY state.
2349 l2cap_abort_rx_srej_sent(chan);
2352 l2cap_send_ack(chan);
2355 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2356 BT_DBG("Exit LOCAL_BUSY");
2357 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2359 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2360 struct l2cap_ctrl local_control;
2362 memset(&local_control, 0, sizeof(local_control));
2363 local_control.sframe = 1;
2364 local_control.super = L2CAP_SUPER_RR;
2365 local_control.poll = 1;
2366 local_control.reqseq = chan->buffer_seq;
2367 l2cap_send_sframe(chan, &local_control);
2369 chan->retry_count = 1;
2370 __set_monitor_timer(chan);
2371 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2374 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2375 l2cap_process_reqseq(chan, control->reqseq);
2377 case L2CAP_EV_EXPLICIT_POLL:
2378 l2cap_send_rr_or_rnr(chan, 1);
2379 chan->retry_count = 1;
2380 __set_monitor_timer(chan);
2381 __clear_ack_timer(chan);
2382 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2384 case L2CAP_EV_RETRANS_TO:
2385 l2cap_send_rr_or_rnr(chan, 1);
2386 chan->retry_count = 1;
2387 __set_monitor_timer(chan);
2388 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2390 case L2CAP_EV_RECV_FBIT:
2391 /* Nothing to process */
2398 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2399 struct l2cap_ctrl *control,
2400 struct sk_buff_head *skbs, u8 event)
2402 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2406 case L2CAP_EV_DATA_REQUEST:
2407 if (chan->tx_send_head == NULL)
2408 chan->tx_send_head = skb_peek(skbs);
2409 /* Queue data, but don't send. */
2410 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2412 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2413 BT_DBG("Enter LOCAL_BUSY");
2414 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2416 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2417 /* The SREJ_SENT state must be aborted if we are to
2418 * enter the LOCAL_BUSY state.
2420 l2cap_abort_rx_srej_sent(chan);
2423 l2cap_send_ack(chan);
2426 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2427 BT_DBG("Exit LOCAL_BUSY");
2428 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2430 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2431 struct l2cap_ctrl local_control;
2432 memset(&local_control, 0, sizeof(local_control));
2433 local_control.sframe = 1;
2434 local_control.super = L2CAP_SUPER_RR;
2435 local_control.poll = 1;
2436 local_control.reqseq = chan->buffer_seq;
2437 l2cap_send_sframe(chan, &local_control);
2439 chan->retry_count = 1;
2440 __set_monitor_timer(chan);
2441 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2444 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2445 l2cap_process_reqseq(chan, control->reqseq);
2449 case L2CAP_EV_RECV_FBIT:
2450 if (control && control->final) {
2451 __clear_monitor_timer(chan);
2452 if (chan->unacked_frames > 0)
2453 __set_retrans_timer(chan);
2454 chan->retry_count = 0;
2455 chan->tx_state = L2CAP_TX_STATE_XMIT;
2456 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2459 case L2CAP_EV_EXPLICIT_POLL:
2462 case L2CAP_EV_MONITOR_TO:
2463 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2464 l2cap_send_rr_or_rnr(chan, 1);
2465 __set_monitor_timer(chan);
2466 chan->retry_count++;
2468 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2476 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2477 struct sk_buff_head *skbs, u8 event)
2479 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2480 chan, control, skbs, event, chan->tx_state);
2482 switch (chan->tx_state) {
2483 case L2CAP_TX_STATE_XMIT:
2484 l2cap_tx_state_xmit(chan, control, skbs, event);
2486 case L2CAP_TX_STATE_WAIT_F:
2487 l2cap_tx_state_wait_f(chan, control, skbs, event);
2495 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2496 struct l2cap_ctrl *control)
2498 BT_DBG("chan %p, control %p", chan, control);
2499 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2502 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2503 struct l2cap_ctrl *control)
2505 BT_DBG("chan %p, control %p", chan, control);
2506 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2509 /* Copy frame to all raw sockets on that connection */
2510 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2512 struct sk_buff *nskb;
2513 struct l2cap_chan *chan;
2515 BT_DBG("conn %p", conn);
2517 mutex_lock(&conn->chan_lock);
2519 list_for_each_entry(chan, &conn->chan_l, list) {
2520 struct sock *sk = chan->sk;
2521 if (chan->chan_type != L2CAP_CHAN_RAW)
2524 /* Don't send frame to the socket it came from */
2527 nskb = skb_clone(skb, GFP_ATOMIC);
2531 if (chan->ops->recv(chan, nskb))
2535 mutex_unlock(&conn->chan_lock);
2538 /* ---- L2CAP signalling commands ---- */
2539 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2540 u8 ident, u16 dlen, void *data)
2542 struct sk_buff *skb, **frag;
2543 struct l2cap_cmd_hdr *cmd;
2544 struct l2cap_hdr *lh;
2547 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2548 conn, code, ident, dlen);
2550 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2551 count = min_t(unsigned int, conn->mtu, len);
2553 skb = bt_skb_alloc(count, GFP_ATOMIC);
2557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2558 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2560 if (conn->hcon->type == LE_LINK)
2561 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2563 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2565 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2568 cmd->len = cpu_to_le16(dlen);
2571 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2572 memcpy(skb_put(skb, count), data, count);
2578 /* Continuation fragments (no L2CAP header) */
2579 frag = &skb_shinfo(skb)->frag_list;
2581 count = min_t(unsigned int, conn->mtu, len);
2583 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2587 memcpy(skb_put(*frag, count), data, count);
2592 frag = &(*frag)->next;
2602 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2604 struct l2cap_conf_opt *opt = *ptr;
2607 len = L2CAP_CONF_OPT_SIZE + opt->len;
2615 *val = *((u8 *) opt->val);
2619 *val = get_unaligned_le16(opt->val);
2623 *val = get_unaligned_le32(opt->val);
2627 *val = (unsigned long) opt->val;
2631 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2635 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2637 struct l2cap_conf_opt *opt = *ptr;
2639 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2646 *((u8 *) opt->val) = val;
2650 put_unaligned_le16(val, opt->val);
2654 put_unaligned_le32(val, opt->val);
2658 memcpy(opt->val, (void *) val, len);
2662 *ptr += L2CAP_CONF_OPT_SIZE + len;
2665 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2667 struct l2cap_conf_efs efs;
2669 switch (chan->mode) {
2670 case L2CAP_MODE_ERTM:
2671 efs.id = chan->local_id;
2672 efs.stype = chan->local_stype;
2673 efs.msdu = cpu_to_le16(chan->local_msdu);
2674 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2675 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2676 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2679 case L2CAP_MODE_STREAMING:
2681 efs.stype = L2CAP_SERV_BESTEFFORT;
2682 efs.msdu = cpu_to_le16(chan->local_msdu);
2683 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2692 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2693 (unsigned long) &efs);
2696 static void l2cap_ack_timeout(struct work_struct *work)
2698 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2702 BT_DBG("chan %p", chan);
2704 l2cap_chan_lock(chan);
2706 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2707 chan->last_acked_seq);
2710 l2cap_send_rr_or_rnr(chan, 0);
2712 l2cap_chan_unlock(chan);
2713 l2cap_chan_put(chan);
2716 int l2cap_ertm_init(struct l2cap_chan *chan)
2720 chan->next_tx_seq = 0;
2721 chan->expected_tx_seq = 0;
2722 chan->expected_ack_seq = 0;
2723 chan->unacked_frames = 0;
2724 chan->buffer_seq = 0;
2725 chan->frames_sent = 0;
2726 chan->last_acked_seq = 0;
2728 chan->sdu_last_frag = NULL;
2731 skb_queue_head_init(&chan->tx_q);
2733 if (chan->mode != L2CAP_MODE_ERTM)
2736 chan->rx_state = L2CAP_RX_STATE_RECV;
2737 chan->tx_state = L2CAP_TX_STATE_XMIT;
2739 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2740 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2741 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2743 skb_queue_head_init(&chan->srej_q);
2745 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2749 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2751 l2cap_seq_list_free(&chan->srej_list);
2756 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2759 case L2CAP_MODE_STREAMING:
2760 case L2CAP_MODE_ERTM:
2761 if (l2cap_mode_supported(mode, remote_feat_mask))
2765 return L2CAP_MODE_BASIC;
2769 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2771 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2774 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2776 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2779 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2781 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2782 __l2cap_ews_supported(chan)) {
2783 /* use extended control field */
2784 set_bit(FLAG_EXT_CTRL, &chan->flags);
2785 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2787 chan->tx_win = min_t(u16, chan->tx_win,
2788 L2CAP_DEFAULT_TX_WINDOW);
2789 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2791 chan->ack_win = chan->tx_win;
2794 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2796 struct l2cap_conf_req *req = data;
2797 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2798 void *ptr = req->data;
2801 BT_DBG("chan %p", chan);
2803 if (chan->num_conf_req || chan->num_conf_rsp)
2806 switch (chan->mode) {
2807 case L2CAP_MODE_STREAMING:
2808 case L2CAP_MODE_ERTM:
2809 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2812 if (__l2cap_efs_supported(chan))
2813 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2817 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2822 if (chan->imtu != L2CAP_DEFAULT_MTU)
2823 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2825 switch (chan->mode) {
2826 case L2CAP_MODE_BASIC:
2827 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2828 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2831 rfc.mode = L2CAP_MODE_BASIC;
2833 rfc.max_transmit = 0;
2834 rfc.retrans_timeout = 0;
2835 rfc.monitor_timeout = 0;
2836 rfc.max_pdu_size = 0;
2838 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2839 (unsigned long) &rfc);
2842 case L2CAP_MODE_ERTM:
2843 rfc.mode = L2CAP_MODE_ERTM;
2844 rfc.max_transmit = chan->max_tx;
2845 rfc.retrans_timeout = 0;
2846 rfc.monitor_timeout = 0;
2848 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2849 L2CAP_EXT_HDR_SIZE -
2852 rfc.max_pdu_size = cpu_to_le16(size);
2854 l2cap_txwin_setup(chan);
2856 rfc.txwin_size = min_t(u16, chan->tx_win,
2857 L2CAP_DEFAULT_TX_WINDOW);
2859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2860 (unsigned long) &rfc);
2862 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2863 l2cap_add_opt_efs(&ptr, chan);
2865 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2868 if (chan->fcs == L2CAP_FCS_NONE ||
2869 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2870 chan->fcs = L2CAP_FCS_NONE;
2871 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2874 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2875 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2879 case L2CAP_MODE_STREAMING:
2880 l2cap_txwin_setup(chan);
2881 rfc.mode = L2CAP_MODE_STREAMING;
2883 rfc.max_transmit = 0;
2884 rfc.retrans_timeout = 0;
2885 rfc.monitor_timeout = 0;
2887 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2888 L2CAP_EXT_HDR_SIZE -
2891 rfc.max_pdu_size = cpu_to_le16(size);
2893 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2894 (unsigned long) &rfc);
2896 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2897 l2cap_add_opt_efs(&ptr, chan);
2899 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2902 if (chan->fcs == L2CAP_FCS_NONE ||
2903 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2904 chan->fcs = L2CAP_FCS_NONE;
2905 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2910 req->dcid = cpu_to_le16(chan->dcid);
2911 req->flags = __constant_cpu_to_le16(0);
2916 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2918 struct l2cap_conf_rsp *rsp = data;
2919 void *ptr = rsp->data;
2920 void *req = chan->conf_req;
2921 int len = chan->conf_len;
2922 int type, hint, olen;
2924 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2925 struct l2cap_conf_efs efs;
2927 u16 mtu = L2CAP_DEFAULT_MTU;
2928 u16 result = L2CAP_CONF_SUCCESS;
2931 BT_DBG("chan %p", chan);
2933 while (len >= L2CAP_CONF_OPT_SIZE) {
2934 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2936 hint = type & L2CAP_CONF_HINT;
2937 type &= L2CAP_CONF_MASK;
2940 case L2CAP_CONF_MTU:
2944 case L2CAP_CONF_FLUSH_TO:
2945 chan->flush_to = val;
2948 case L2CAP_CONF_QOS:
2951 case L2CAP_CONF_RFC:
2952 if (olen == sizeof(rfc))
2953 memcpy(&rfc, (void *) val, olen);
2956 case L2CAP_CONF_FCS:
2957 if (val == L2CAP_FCS_NONE)
2958 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2961 case L2CAP_CONF_EFS:
2963 if (olen == sizeof(efs))
2964 memcpy(&efs, (void *) val, olen);
2967 case L2CAP_CONF_EWS:
2969 return -ECONNREFUSED;
2971 set_bit(FLAG_EXT_CTRL, &chan->flags);
2972 set_bit(CONF_EWS_RECV, &chan->conf_state);
2973 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2974 chan->remote_tx_win = val;
2981 result = L2CAP_CONF_UNKNOWN;
2982 *((u8 *) ptr++) = type;
2987 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2990 switch (chan->mode) {
2991 case L2CAP_MODE_STREAMING:
2992 case L2CAP_MODE_ERTM:
2993 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2994 chan->mode = l2cap_select_mode(rfc.mode,
2995 chan->conn->feat_mask);
3000 if (__l2cap_efs_supported(chan))
3001 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3003 return -ECONNREFUSED;
3006 if (chan->mode != rfc.mode)
3007 return -ECONNREFUSED;
3013 if (chan->mode != rfc.mode) {
3014 result = L2CAP_CONF_UNACCEPT;
3015 rfc.mode = chan->mode;
3017 if (chan->num_conf_rsp == 1)
3018 return -ECONNREFUSED;
3020 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3021 sizeof(rfc), (unsigned long) &rfc);
3024 if (result == L2CAP_CONF_SUCCESS) {
3025 /* Configure output options and let the other side know
3026 * which ones we don't like. */
3028 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3029 result = L2CAP_CONF_UNACCEPT;
3032 set_bit(CONF_MTU_DONE, &chan->conf_state);
3034 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3037 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3038 efs.stype != L2CAP_SERV_NOTRAFIC &&
3039 efs.stype != chan->local_stype) {
3041 result = L2CAP_CONF_UNACCEPT;
3043 if (chan->num_conf_req >= 1)
3044 return -ECONNREFUSED;
3046 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3048 (unsigned long) &efs);
3050 /* Send PENDING Conf Rsp */
3051 result = L2CAP_CONF_PENDING;
3052 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3057 case L2CAP_MODE_BASIC:
3058 chan->fcs = L2CAP_FCS_NONE;
3059 set_bit(CONF_MODE_DONE, &chan->conf_state);
3062 case L2CAP_MODE_ERTM:
3063 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3064 chan->remote_tx_win = rfc.txwin_size;
3066 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3068 chan->remote_max_tx = rfc.max_transmit;
3070 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3072 L2CAP_EXT_HDR_SIZE -
3075 rfc.max_pdu_size = cpu_to_le16(size);
3076 chan->remote_mps = size;
3078 rfc.retrans_timeout =
3079 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3080 rfc.monitor_timeout =
3081 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3083 set_bit(CONF_MODE_DONE, &chan->conf_state);
3085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3086 sizeof(rfc), (unsigned long) &rfc);
3088 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3089 chan->remote_id = efs.id;
3090 chan->remote_stype = efs.stype;
3091 chan->remote_msdu = le16_to_cpu(efs.msdu);
3092 chan->remote_flush_to =
3093 le32_to_cpu(efs.flush_to);
3094 chan->remote_acc_lat =
3095 le32_to_cpu(efs.acc_lat);
3096 chan->remote_sdu_itime =
3097 le32_to_cpu(efs.sdu_itime);
3098 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3099 sizeof(efs), (unsigned long) &efs);
3103 case L2CAP_MODE_STREAMING:
3104 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3106 L2CAP_EXT_HDR_SIZE -
3109 rfc.max_pdu_size = cpu_to_le16(size);
3110 chan->remote_mps = size;
3112 set_bit(CONF_MODE_DONE, &chan->conf_state);
3114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3115 sizeof(rfc), (unsigned long) &rfc);
3120 result = L2CAP_CONF_UNACCEPT;
3122 memset(&rfc, 0, sizeof(rfc));
3123 rfc.mode = chan->mode;
3126 if (result == L2CAP_CONF_SUCCESS)
3127 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3129 rsp->scid = cpu_to_le16(chan->dcid);
3130 rsp->result = cpu_to_le16(result);
3131 rsp->flags = __constant_cpu_to_le16(0);
3136 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3138 struct l2cap_conf_req *req = data;
3139 void *ptr = req->data;
3142 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3143 struct l2cap_conf_efs efs;
3145 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3147 while (len >= L2CAP_CONF_OPT_SIZE) {
3148 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3151 case L2CAP_CONF_MTU:
3152 if (val < L2CAP_DEFAULT_MIN_MTU) {
3153 *result = L2CAP_CONF_UNACCEPT;
3154 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3157 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3160 case L2CAP_CONF_FLUSH_TO:
3161 chan->flush_to = val;
3162 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3166 case L2CAP_CONF_RFC:
3167 if (olen == sizeof(rfc))
3168 memcpy(&rfc, (void *)val, olen);
3170 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3171 rfc.mode != chan->mode)
3172 return -ECONNREFUSED;
3176 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3177 sizeof(rfc), (unsigned long) &rfc);
3180 case L2CAP_CONF_EWS:
3181 chan->ack_win = min_t(u16, val, chan->ack_win);
3182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3186 case L2CAP_CONF_EFS:
3187 if (olen == sizeof(efs))
3188 memcpy(&efs, (void *)val, olen);
3190 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3191 efs.stype != L2CAP_SERV_NOTRAFIC &&
3192 efs.stype != chan->local_stype)
3193 return -ECONNREFUSED;
3195 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3196 sizeof(efs), (unsigned long) &efs);
3201 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3202 return -ECONNREFUSED;
3204 chan->mode = rfc.mode;
3206 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3208 case L2CAP_MODE_ERTM:
3209 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3210 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3211 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3212 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3213 chan->ack_win = min_t(u16, chan->ack_win,
3216 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3217 chan->local_msdu = le16_to_cpu(efs.msdu);
3218 chan->local_sdu_itime =
3219 le32_to_cpu(efs.sdu_itime);
3220 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3221 chan->local_flush_to =
3222 le32_to_cpu(efs.flush_to);
3226 case L2CAP_MODE_STREAMING:
3227 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3231 req->dcid = cpu_to_le16(chan->dcid);
3232 req->flags = __constant_cpu_to_le16(0);
3237 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3239 struct l2cap_conf_rsp *rsp = data;
3240 void *ptr = rsp->data;
3242 BT_DBG("chan %p", chan);
3244 rsp->scid = cpu_to_le16(chan->dcid);
3245 rsp->result = cpu_to_le16(result);
3246 rsp->flags = cpu_to_le16(flags);
3251 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3253 struct l2cap_conn_rsp rsp;
3254 struct l2cap_conn *conn = chan->conn;
3257 rsp.scid = cpu_to_le16(chan->dcid);
3258 rsp.dcid = cpu_to_le16(chan->scid);
3259 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3260 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3261 l2cap_send_cmd(conn, chan->ident,
3262 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3264 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3267 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3268 l2cap_build_conf_req(chan, buf), buf);
3269 chan->num_conf_req++;
3272 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3276 /* Use sane default values in case a misbehaving remote device
3277 * did not send an RFC or extended window size option.
3279 u16 txwin_ext = chan->ack_win;
3280 struct l2cap_conf_rfc rfc = {
3282 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3283 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3284 .max_pdu_size = cpu_to_le16(chan->imtu),
3285 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3288 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3290 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3293 while (len >= L2CAP_CONF_OPT_SIZE) {
3294 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3297 case L2CAP_CONF_RFC:
3298 if (olen == sizeof(rfc))
3299 memcpy(&rfc, (void *)val, olen);
3301 case L2CAP_CONF_EWS:
3308 case L2CAP_MODE_ERTM:
3309 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3310 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3311 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3312 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3313 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3315 chan->ack_win = min_t(u16, chan->ack_win,
3318 case L2CAP_MODE_STREAMING:
3319 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3323 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3325 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3327 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3330 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3331 cmd->ident == conn->info_ident) {
3332 cancel_delayed_work(&conn->info_timer);
3334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3335 conn->info_ident = 0;
3337 l2cap_conn_start(conn);
3343 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3345 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3346 struct l2cap_conn_rsp rsp;
3347 struct l2cap_chan *chan = NULL, *pchan;
3348 struct sock *parent, *sk = NULL;
3349 int result, status = L2CAP_CS_NO_INFO;
3351 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3352 __le16 psm = req->psm;
3354 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3356 /* Check if we have socket listening on psm */
3357 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3359 result = L2CAP_CR_BAD_PSM;
3365 mutex_lock(&conn->chan_lock);
3368 /* Check if the ACL is secure enough (if not SDP) */
3369 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3370 !hci_conn_check_link_mode(conn->hcon)) {
3371 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3372 result = L2CAP_CR_SEC_BLOCK;
3376 result = L2CAP_CR_NO_MEM;
3378 /* Check if we already have channel with that dcid */
3379 if (__l2cap_get_chan_by_dcid(conn, scid))
3382 chan = pchan->ops->new_connection(pchan);
3388 hci_conn_hold(conn->hcon);
3390 bacpy(&bt_sk(sk)->src, conn->src);
3391 bacpy(&bt_sk(sk)->dst, conn->dst);
3395 bt_accept_enqueue(parent, sk);
3397 __l2cap_chan_add(conn, chan);
3401 __set_chan_timer(chan, sk->sk_sndtimeo);
3403 chan->ident = cmd->ident;
3405 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3406 if (l2cap_chan_check_security(chan)) {
3407 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3408 __l2cap_state_change(chan, BT_CONNECT2);
3409 result = L2CAP_CR_PEND;
3410 status = L2CAP_CS_AUTHOR_PEND;
3411 parent->sk_data_ready(parent, 0);
3413 __l2cap_state_change(chan, BT_CONFIG);
3414 result = L2CAP_CR_SUCCESS;
3415 status = L2CAP_CS_NO_INFO;
3418 __l2cap_state_change(chan, BT_CONNECT2);
3419 result = L2CAP_CR_PEND;
3420 status = L2CAP_CS_AUTHEN_PEND;
3423 __l2cap_state_change(chan, BT_CONNECT2);
3424 result = L2CAP_CR_PEND;
3425 status = L2CAP_CS_NO_INFO;
3429 release_sock(parent);
3430 mutex_unlock(&conn->chan_lock);
3433 rsp.scid = cpu_to_le16(scid);
3434 rsp.dcid = cpu_to_le16(dcid);
3435 rsp.result = cpu_to_le16(result);
3436 rsp.status = cpu_to_le16(status);
3437 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3439 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3440 struct l2cap_info_req info;
3441 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3444 conn->info_ident = l2cap_get_ident(conn);
3446 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3448 l2cap_send_cmd(conn, conn->info_ident,
3449 L2CAP_INFO_REQ, sizeof(info), &info);
3452 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3453 result == L2CAP_CR_SUCCESS) {
3455 set_bit(CONF_REQ_SENT, &chan->conf_state);
3456 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3457 l2cap_build_conf_req(chan, buf), buf);
3458 chan->num_conf_req++;
3464 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3466 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3467 u16 scid, dcid, result, status;
3468 struct l2cap_chan *chan;
3472 scid = __le16_to_cpu(rsp->scid);
3473 dcid = __le16_to_cpu(rsp->dcid);
3474 result = __le16_to_cpu(rsp->result);
3475 status = __le16_to_cpu(rsp->status);
3477 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3478 dcid, scid, result, status);
3480 mutex_lock(&conn->chan_lock);
3483 chan = __l2cap_get_chan_by_scid(conn, scid);
3489 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3498 l2cap_chan_lock(chan);
3501 case L2CAP_CR_SUCCESS:
3502 l2cap_state_change(chan, BT_CONFIG);
3505 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3507 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3510 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3511 l2cap_build_conf_req(chan, req), req);
3512 chan->num_conf_req++;
3516 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3520 l2cap_chan_del(chan, ECONNREFUSED);
3524 l2cap_chan_unlock(chan);
3527 mutex_unlock(&conn->chan_lock);
3532 static inline void set_default_fcs(struct l2cap_chan *chan)
3534 /* FCS is enabled only in ERTM or streaming mode, if one or both
3537 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3538 chan->fcs = L2CAP_FCS_NONE;
3539 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3540 chan->fcs = L2CAP_FCS_CRC16;
3543 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3545 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3548 struct l2cap_chan *chan;
3551 dcid = __le16_to_cpu(req->dcid);
3552 flags = __le16_to_cpu(req->flags);
3554 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3556 chan = l2cap_get_chan_by_scid(conn, dcid);
3560 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3561 struct l2cap_cmd_rej_cid rej;
3563 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3564 rej.scid = cpu_to_le16(chan->scid);
3565 rej.dcid = cpu_to_le16(chan->dcid);
3567 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3572 /* Reject if config buffer is too small. */
3573 len = cmd_len - sizeof(*req);
3574 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3575 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3576 l2cap_build_conf_rsp(chan, rsp,
3577 L2CAP_CONF_REJECT, flags), rsp);
3582 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3583 chan->conf_len += len;
3585 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3586 /* Incomplete config. Send empty response. */
3587 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3588 l2cap_build_conf_rsp(chan, rsp,
3589 L2CAP_CONF_SUCCESS, flags), rsp);
3593 /* Complete config. */
3594 len = l2cap_parse_conf_req(chan, rsp);
3596 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3600 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3601 chan->num_conf_rsp++;
3603 /* Reset config buffer. */
3606 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3609 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3610 set_default_fcs(chan);
3612 if (chan->mode == L2CAP_MODE_ERTM ||
3613 chan->mode == L2CAP_MODE_STREAMING)
3614 err = l2cap_ertm_init(chan);
3617 l2cap_send_disconn_req(chan->conn, chan, -err);
3619 l2cap_chan_ready(chan);
3624 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3626 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3627 l2cap_build_conf_req(chan, buf), buf);
3628 chan->num_conf_req++;
3631 /* Got Conf Rsp PENDING from remote side and asume we sent
3632 Conf Rsp PENDING in the code above */
3633 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3634 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3636 /* check compatibility */
3638 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3639 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3641 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3642 l2cap_build_conf_rsp(chan, rsp,
3643 L2CAP_CONF_SUCCESS, flags), rsp);
3647 l2cap_chan_unlock(chan);
3651 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3653 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3654 u16 scid, flags, result;
3655 struct l2cap_chan *chan;
3656 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3659 scid = __le16_to_cpu(rsp->scid);
3660 flags = __le16_to_cpu(rsp->flags);
3661 result = __le16_to_cpu(rsp->result);
3663 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3666 chan = l2cap_get_chan_by_scid(conn, scid);
3671 case L2CAP_CONF_SUCCESS:
3672 l2cap_conf_rfc_get(chan, rsp->data, len);
3673 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3676 case L2CAP_CONF_PENDING:
3677 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3679 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3682 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3685 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3689 /* check compatibility */
3691 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3692 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3694 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3695 l2cap_build_conf_rsp(chan, buf,
3696 L2CAP_CONF_SUCCESS, 0x0000), buf);
3700 case L2CAP_CONF_UNACCEPT:
3701 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3704 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3705 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3709 /* throw out any old stored conf requests */
3710 result = L2CAP_CONF_SUCCESS;
3711 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3714 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3718 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3719 L2CAP_CONF_REQ, len, req);
3720 chan->num_conf_req++;
3721 if (result != L2CAP_CONF_SUCCESS)
3727 l2cap_chan_set_err(chan, ECONNRESET);
3729 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3730 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3734 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3737 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3739 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3740 set_default_fcs(chan);
3742 if (chan->mode == L2CAP_MODE_ERTM ||
3743 chan->mode == L2CAP_MODE_STREAMING)
3744 err = l2cap_ertm_init(chan);
3747 l2cap_send_disconn_req(chan->conn, chan, -err);
3749 l2cap_chan_ready(chan);
3753 l2cap_chan_unlock(chan);
3757 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3759 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3760 struct l2cap_disconn_rsp rsp;
3762 struct l2cap_chan *chan;
3765 scid = __le16_to_cpu(req->scid);
3766 dcid = __le16_to_cpu(req->dcid);
3768 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3770 mutex_lock(&conn->chan_lock);
3772 chan = __l2cap_get_chan_by_scid(conn, dcid);
3774 mutex_unlock(&conn->chan_lock);
3778 l2cap_chan_lock(chan);
3782 rsp.dcid = cpu_to_le16(chan->scid);
3783 rsp.scid = cpu_to_le16(chan->dcid);
3784 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3787 sk->sk_shutdown = SHUTDOWN_MASK;
3790 l2cap_chan_hold(chan);
3791 l2cap_chan_del(chan, ECONNRESET);
3793 l2cap_chan_unlock(chan);
3795 chan->ops->close(chan);
3796 l2cap_chan_put(chan);
3798 mutex_unlock(&conn->chan_lock);
3803 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3805 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3807 struct l2cap_chan *chan;
3809 scid = __le16_to_cpu(rsp->scid);
3810 dcid = __le16_to_cpu(rsp->dcid);
3812 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3814 mutex_lock(&conn->chan_lock);
3816 chan = __l2cap_get_chan_by_scid(conn, scid);
3818 mutex_unlock(&conn->chan_lock);
3822 l2cap_chan_lock(chan);
3824 l2cap_chan_hold(chan);
3825 l2cap_chan_del(chan, 0);
3827 l2cap_chan_unlock(chan);
3829 chan->ops->close(chan);
3830 l2cap_chan_put(chan);
3832 mutex_unlock(&conn->chan_lock);
3837 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3839 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3842 type = __le16_to_cpu(req->type);
3844 BT_DBG("type 0x%4.4x", type);
3846 if (type == L2CAP_IT_FEAT_MASK) {
3848 u32 feat_mask = l2cap_feat_mask;
3849 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3850 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3851 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3853 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3856 feat_mask |= L2CAP_FEAT_EXT_FLOW
3857 | L2CAP_FEAT_EXT_WINDOW;
3859 put_unaligned_le32(feat_mask, rsp->data);
3860 l2cap_send_cmd(conn, cmd->ident,
3861 L2CAP_INFO_RSP, sizeof(buf), buf);
3862 } else if (type == L2CAP_IT_FIXED_CHAN) {
3864 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3867 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3869 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3871 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3872 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3873 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3874 l2cap_send_cmd(conn, cmd->ident,
3875 L2CAP_INFO_RSP, sizeof(buf), buf);
3877 struct l2cap_info_rsp rsp;
3878 rsp.type = cpu_to_le16(type);
3879 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3880 l2cap_send_cmd(conn, cmd->ident,
3881 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3887 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3889 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3892 type = __le16_to_cpu(rsp->type);
3893 result = __le16_to_cpu(rsp->result);
3895 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3897 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3898 if (cmd->ident != conn->info_ident ||
3899 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3902 cancel_delayed_work(&conn->info_timer);
3904 if (result != L2CAP_IR_SUCCESS) {
3905 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3906 conn->info_ident = 0;
3908 l2cap_conn_start(conn);
3914 case L2CAP_IT_FEAT_MASK:
3915 conn->feat_mask = get_unaligned_le32(rsp->data);
3917 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3918 struct l2cap_info_req req;
3919 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3921 conn->info_ident = l2cap_get_ident(conn);
3923 l2cap_send_cmd(conn, conn->info_ident,
3924 L2CAP_INFO_REQ, sizeof(req), &req);
3926 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3927 conn->info_ident = 0;
3929 l2cap_conn_start(conn);
3933 case L2CAP_IT_FIXED_CHAN:
3934 conn->fixed_chan_mask = rsp->data[0];
3935 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3936 conn->info_ident = 0;
3938 l2cap_conn_start(conn);
3945 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3946 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3949 struct l2cap_create_chan_req *req = data;
3950 struct l2cap_create_chan_rsp rsp;
3953 if (cmd_len != sizeof(*req))
3959 psm = le16_to_cpu(req->psm);
3960 scid = le16_to_cpu(req->scid);
3962 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3964 /* Placeholder: Always reject */
3966 rsp.scid = cpu_to_le16(scid);
3967 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3968 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3970 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3976 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3977 struct l2cap_cmd_hdr *cmd, void *data)
3979 BT_DBG("conn %p", conn);
3981 return l2cap_connect_rsp(conn, cmd, data);
3984 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3985 u16 icid, u16 result)
3987 struct l2cap_move_chan_rsp rsp;
3989 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3991 rsp.icid = cpu_to_le16(icid);
3992 rsp.result = cpu_to_le16(result);
3994 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3997 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3998 struct l2cap_chan *chan,
3999 u16 icid, u16 result)
4001 struct l2cap_move_chan_cfm cfm;
4004 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4006 ident = l2cap_get_ident(conn);
4008 chan->ident = ident;
4010 cfm.icid = cpu_to_le16(icid);
4011 cfm.result = cpu_to_le16(result);
4013 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4016 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4019 struct l2cap_move_chan_cfm_rsp rsp;
4021 BT_DBG("icid 0x%4.4x", icid);
4023 rsp.icid = cpu_to_le16(icid);
4024 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4027 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4028 struct l2cap_cmd_hdr *cmd,
4029 u16 cmd_len, void *data)
4031 struct l2cap_move_chan_req *req = data;
4033 u16 result = L2CAP_MR_NOT_ALLOWED;
4035 if (cmd_len != sizeof(*req))
4038 icid = le16_to_cpu(req->icid);
4040 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4045 /* Placeholder: Always refuse */
4046 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4051 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4052 struct l2cap_cmd_hdr *cmd,
4053 u16 cmd_len, void *data)
4055 struct l2cap_move_chan_rsp *rsp = data;
4058 if (cmd_len != sizeof(*rsp))
4061 icid = le16_to_cpu(rsp->icid);
4062 result = le16_to_cpu(rsp->result);
4064 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4066 /* Placeholder: Always unconfirmed */
4067 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4072 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4073 struct l2cap_cmd_hdr *cmd,
4074 u16 cmd_len, void *data)
4076 struct l2cap_move_chan_cfm *cfm = data;
4079 if (cmd_len != sizeof(*cfm))
4082 icid = le16_to_cpu(cfm->icid);
4083 result = le16_to_cpu(cfm->result);
4085 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4087 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4092 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4093 struct l2cap_cmd_hdr *cmd,
4094 u16 cmd_len, void *data)
4096 struct l2cap_move_chan_cfm_rsp *rsp = data;
4099 if (cmd_len != sizeof(*rsp))
4102 icid = le16_to_cpu(rsp->icid);
4104 BT_DBG("icid 0x%4.4x", icid);
4109 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4114 if (min > max || min < 6 || max > 3200)
4117 if (to_multiplier < 10 || to_multiplier > 3200)
4120 if (max >= to_multiplier * 8)
4123 max_latency = (to_multiplier * 8 / max) - 1;
4124 if (latency > 499 || latency > max_latency)
4130 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4131 struct l2cap_cmd_hdr *cmd, u8 *data)
4133 struct hci_conn *hcon = conn->hcon;
4134 struct l2cap_conn_param_update_req *req;
4135 struct l2cap_conn_param_update_rsp rsp;
4136 u16 min, max, latency, to_multiplier, cmd_len;
4139 if (!(hcon->link_mode & HCI_LM_MASTER))
4142 cmd_len = __le16_to_cpu(cmd->len);
4143 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4146 req = (struct l2cap_conn_param_update_req *) data;
4147 min = __le16_to_cpu(req->min);
4148 max = __le16_to_cpu(req->max);
4149 latency = __le16_to_cpu(req->latency);
4150 to_multiplier = __le16_to_cpu(req->to_multiplier);
4152 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4153 min, max, latency, to_multiplier);
4155 memset(&rsp, 0, sizeof(rsp));
4157 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4159 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4161 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4163 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4167 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4172 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4173 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4177 switch (cmd->code) {
4178 case L2CAP_COMMAND_REJ:
4179 l2cap_command_rej(conn, cmd, data);
4182 case L2CAP_CONN_REQ:
4183 err = l2cap_connect_req(conn, cmd, data);
4186 case L2CAP_CONN_RSP:
4187 err = l2cap_connect_rsp(conn, cmd, data);
4190 case L2CAP_CONF_REQ:
4191 err = l2cap_config_req(conn, cmd, cmd_len, data);
4194 case L2CAP_CONF_RSP:
4195 err = l2cap_config_rsp(conn, cmd, data);
4198 case L2CAP_DISCONN_REQ:
4199 err = l2cap_disconnect_req(conn, cmd, data);
4202 case L2CAP_DISCONN_RSP:
4203 err = l2cap_disconnect_rsp(conn, cmd, data);
4206 case L2CAP_ECHO_REQ:
4207 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4210 case L2CAP_ECHO_RSP:
4213 case L2CAP_INFO_REQ:
4214 err = l2cap_information_req(conn, cmd, data);
4217 case L2CAP_INFO_RSP:
4218 err = l2cap_information_rsp(conn, cmd, data);
4221 case L2CAP_CREATE_CHAN_REQ:
4222 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4225 case L2CAP_CREATE_CHAN_RSP:
4226 err = l2cap_create_channel_rsp(conn, cmd, data);
4229 case L2CAP_MOVE_CHAN_REQ:
4230 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4233 case L2CAP_MOVE_CHAN_RSP:
4234 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4237 case L2CAP_MOVE_CHAN_CFM:
4238 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4241 case L2CAP_MOVE_CHAN_CFM_RSP:
4242 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4246 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4254 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4255 struct l2cap_cmd_hdr *cmd, u8 *data)
4257 switch (cmd->code) {
4258 case L2CAP_COMMAND_REJ:
4261 case L2CAP_CONN_PARAM_UPDATE_REQ:
4262 return l2cap_conn_param_update_req(conn, cmd, data);
4264 case L2CAP_CONN_PARAM_UPDATE_RSP:
4268 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4273 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4274 struct sk_buff *skb)
4276 u8 *data = skb->data;
4278 struct l2cap_cmd_hdr cmd;
4281 l2cap_raw_recv(conn, skb);
4283 while (len >= L2CAP_CMD_HDR_SIZE) {
4285 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4286 data += L2CAP_CMD_HDR_SIZE;
4287 len -= L2CAP_CMD_HDR_SIZE;
4289 cmd_len = le16_to_cpu(cmd.len);
4291 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4293 if (cmd_len > len || !cmd.ident) {
4294 BT_DBG("corrupted command");
4298 if (conn->hcon->type == LE_LINK)
4299 err = l2cap_le_sig_cmd(conn, &cmd, data);
4301 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4304 struct l2cap_cmd_rej_unk rej;
4306 BT_ERR("Wrong link type (%d)", err);
4308 /* FIXME: Map err to a valid reason */
4309 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4310 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4320 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4322 u16 our_fcs, rcv_fcs;
4325 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4326 hdr_size = L2CAP_EXT_HDR_SIZE;
4328 hdr_size = L2CAP_ENH_HDR_SIZE;
4330 if (chan->fcs == L2CAP_FCS_CRC16) {
4331 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4332 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4333 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4335 if (our_fcs != rcv_fcs)
4341 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4343 struct l2cap_ctrl control;
4345 BT_DBG("chan %p", chan);
4347 memset(&control, 0, sizeof(control));
4350 control.reqseq = chan->buffer_seq;
4351 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4353 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4354 control.super = L2CAP_SUPER_RNR;
4355 l2cap_send_sframe(chan, &control);
4358 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4359 chan->unacked_frames > 0)
4360 __set_retrans_timer(chan);
4362 /* Send pending iframes */
4363 l2cap_ertm_send(chan);
4365 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4366 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4367 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4370 control.super = L2CAP_SUPER_RR;
4371 l2cap_send_sframe(chan, &control);
4375 static void append_skb_frag(struct sk_buff *skb,
4376 struct sk_buff *new_frag, struct sk_buff **last_frag)
4378 /* skb->len reflects data in skb as well as all fragments
4379 * skb->data_len reflects only data in fragments
4381 if (!skb_has_frag_list(skb))
4382 skb_shinfo(skb)->frag_list = new_frag;
4384 new_frag->next = NULL;
4386 (*last_frag)->next = new_frag;
4387 *last_frag = new_frag;
4389 skb->len += new_frag->len;
4390 skb->data_len += new_frag->len;
4391 skb->truesize += new_frag->truesize;
4394 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4395 struct l2cap_ctrl *control)
4399 switch (control->sar) {
4400 case L2CAP_SAR_UNSEGMENTED:
4404 err = chan->ops->recv(chan, skb);
4407 case L2CAP_SAR_START:
4411 chan->sdu_len = get_unaligned_le16(skb->data);
4412 skb_pull(skb, L2CAP_SDULEN_SIZE);
4414 if (chan->sdu_len > chan->imtu) {
4419 if (skb->len >= chan->sdu_len)
4423 chan->sdu_last_frag = skb;
4429 case L2CAP_SAR_CONTINUE:
4433 append_skb_frag(chan->sdu, skb,
4434 &chan->sdu_last_frag);
4437 if (chan->sdu->len >= chan->sdu_len)
4447 append_skb_frag(chan->sdu, skb,
4448 &chan->sdu_last_frag);
4451 if (chan->sdu->len != chan->sdu_len)
4454 err = chan->ops->recv(chan, chan->sdu);
4457 /* Reassembly complete */
4459 chan->sdu_last_frag = NULL;
4467 kfree_skb(chan->sdu);
4469 chan->sdu_last_frag = NULL;
4476 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4480 if (chan->mode != L2CAP_MODE_ERTM)
4483 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4484 l2cap_tx(chan, NULL, NULL, event);
4487 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4490 /* Pass sequential frames to l2cap_reassemble_sdu()
4491 * until a gap is encountered.
4494 BT_DBG("chan %p", chan);
4496 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4497 struct sk_buff *skb;
4498 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4499 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4501 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4506 skb_unlink(skb, &chan->srej_q);
4507 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4508 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4513 if (skb_queue_empty(&chan->srej_q)) {
4514 chan->rx_state = L2CAP_RX_STATE_RECV;
4515 l2cap_send_ack(chan);
4521 static void l2cap_handle_srej(struct l2cap_chan *chan,
4522 struct l2cap_ctrl *control)
4524 struct sk_buff *skb;
4526 BT_DBG("chan %p, control %p", chan, control);
4528 if (control->reqseq == chan->next_tx_seq) {
4529 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4530 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4534 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4537 BT_DBG("Seq %d not available for retransmission",
4542 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4543 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4544 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4548 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4550 if (control->poll) {
4551 l2cap_pass_to_tx(chan, control);
4553 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4554 l2cap_retransmit(chan, control);
4555 l2cap_ertm_send(chan);
4557 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4558 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4559 chan->srej_save_reqseq = control->reqseq;
4562 l2cap_pass_to_tx_fbit(chan, control);
4564 if (control->final) {
4565 if (chan->srej_save_reqseq != control->reqseq ||
4566 !test_and_clear_bit(CONN_SREJ_ACT,
4568 l2cap_retransmit(chan, control);
4570 l2cap_retransmit(chan, control);
4571 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4572 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4573 chan->srej_save_reqseq = control->reqseq;
4579 static void l2cap_handle_rej(struct l2cap_chan *chan,
4580 struct l2cap_ctrl *control)
4582 struct sk_buff *skb;
4584 BT_DBG("chan %p, control %p", chan, control);
4586 if (control->reqseq == chan->next_tx_seq) {
4587 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4588 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4592 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4594 if (chan->max_tx && skb &&
4595 bt_cb(skb)->control.retries >= chan->max_tx) {
4596 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4597 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4601 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4603 l2cap_pass_to_tx(chan, control);
4605 if (control->final) {
4606 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4607 l2cap_retransmit_all(chan, control);
4609 l2cap_retransmit_all(chan, control);
4610 l2cap_ertm_send(chan);
4611 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4612 set_bit(CONN_REJ_ACT, &chan->conn_state);
4616 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4618 BT_DBG("chan %p, txseq %d", chan, txseq);
4620 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4621 chan->expected_tx_seq);
4623 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4624 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4626 /* See notes below regarding "double poll" and
4629 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4630 BT_DBG("Invalid/Ignore - after SREJ");
4631 return L2CAP_TXSEQ_INVALID_IGNORE;
4633 BT_DBG("Invalid - in window after SREJ sent");
4634 return L2CAP_TXSEQ_INVALID;
4638 if (chan->srej_list.head == txseq) {
4639 BT_DBG("Expected SREJ");
4640 return L2CAP_TXSEQ_EXPECTED_SREJ;
4643 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4644 BT_DBG("Duplicate SREJ - txseq already stored");
4645 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4648 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4649 BT_DBG("Unexpected SREJ - not requested");
4650 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4654 if (chan->expected_tx_seq == txseq) {
4655 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4657 BT_DBG("Invalid - txseq outside tx window");
4658 return L2CAP_TXSEQ_INVALID;
4661 return L2CAP_TXSEQ_EXPECTED;
4665 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4666 __seq_offset(chan, chan->expected_tx_seq,
4667 chan->last_acked_seq)){
4668 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4669 return L2CAP_TXSEQ_DUPLICATE;
4672 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4673 /* A source of invalid packets is a "double poll" condition,
4674 * where delays cause us to send multiple poll packets. If
4675 * the remote stack receives and processes both polls,
4676 * sequence numbers can wrap around in such a way that a
4677 * resent frame has a sequence number that looks like new data
4678 * with a sequence gap. This would trigger an erroneous SREJ
4681 * Fortunately, this is impossible with a tx window that's
4682 * less than half of the maximum sequence number, which allows
4683 * invalid frames to be safely ignored.
4685 * With tx window sizes greater than half of the tx window
4686 * maximum, the frame is invalid and cannot be ignored. This
4687 * causes a disconnect.
4690 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4691 BT_DBG("Invalid/Ignore - txseq outside tx window");
4692 return L2CAP_TXSEQ_INVALID_IGNORE;
4694 BT_DBG("Invalid - txseq outside tx window");
4695 return L2CAP_TXSEQ_INVALID;
4698 BT_DBG("Unexpected - txseq indicates missing frames");
4699 return L2CAP_TXSEQ_UNEXPECTED;
4703 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4704 struct l2cap_ctrl *control,
4705 struct sk_buff *skb, u8 event)
4708 bool skb_in_use = 0;
4710 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4714 case L2CAP_EV_RECV_IFRAME:
4715 switch (l2cap_classify_txseq(chan, control->txseq)) {
4716 case L2CAP_TXSEQ_EXPECTED:
4717 l2cap_pass_to_tx(chan, control);
4719 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4720 BT_DBG("Busy, discarding expected seq %d",
4725 chan->expected_tx_seq = __next_seq(chan,
4728 chan->buffer_seq = chan->expected_tx_seq;
4731 err = l2cap_reassemble_sdu(chan, skb, control);
4735 if (control->final) {
4736 if (!test_and_clear_bit(CONN_REJ_ACT,
4737 &chan->conn_state)) {
4739 l2cap_retransmit_all(chan, control);
4740 l2cap_ertm_send(chan);
4744 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4745 l2cap_send_ack(chan);
4747 case L2CAP_TXSEQ_UNEXPECTED:
4748 l2cap_pass_to_tx(chan, control);
4750 /* Can't issue SREJ frames in the local busy state.
4751 * Drop this frame, it will be seen as missing
4752 * when local busy is exited.
4754 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4755 BT_DBG("Busy, discarding unexpected seq %d",
4760 /* There was a gap in the sequence, so an SREJ
4761 * must be sent for each missing frame. The
4762 * current frame is stored for later use.
4764 skb_queue_tail(&chan->srej_q, skb);
4766 BT_DBG("Queued %p (queue len %d)", skb,
4767 skb_queue_len(&chan->srej_q));
4769 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4770 l2cap_seq_list_clear(&chan->srej_list);
4771 l2cap_send_srej(chan, control->txseq);
4773 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4775 case L2CAP_TXSEQ_DUPLICATE:
4776 l2cap_pass_to_tx(chan, control);
4778 case L2CAP_TXSEQ_INVALID_IGNORE:
4780 case L2CAP_TXSEQ_INVALID:
4782 l2cap_send_disconn_req(chan->conn, chan,
4787 case L2CAP_EV_RECV_RR:
4788 l2cap_pass_to_tx(chan, control);
4789 if (control->final) {
4790 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4792 if (!test_and_clear_bit(CONN_REJ_ACT,
4793 &chan->conn_state)) {
4795 l2cap_retransmit_all(chan, control);
4798 l2cap_ertm_send(chan);
4799 } else if (control->poll) {
4800 l2cap_send_i_or_rr_or_rnr(chan);
4802 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4803 &chan->conn_state) &&
4804 chan->unacked_frames)
4805 __set_retrans_timer(chan);
4807 l2cap_ertm_send(chan);
4810 case L2CAP_EV_RECV_RNR:
4811 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4812 l2cap_pass_to_tx(chan, control);
4813 if (control && control->poll) {
4814 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4815 l2cap_send_rr_or_rnr(chan, 0);
4817 __clear_retrans_timer(chan);
4818 l2cap_seq_list_clear(&chan->retrans_list);
4820 case L2CAP_EV_RECV_REJ:
4821 l2cap_handle_rej(chan, control);
4823 case L2CAP_EV_RECV_SREJ:
4824 l2cap_handle_srej(chan, control);
4830 if (skb && !skb_in_use) {
4831 BT_DBG("Freeing %p", skb);
4838 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4839 struct l2cap_ctrl *control,
4840 struct sk_buff *skb, u8 event)
4843 u16 txseq = control->txseq;
4844 bool skb_in_use = 0;
4846 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4850 case L2CAP_EV_RECV_IFRAME:
4851 switch (l2cap_classify_txseq(chan, txseq)) {
4852 case L2CAP_TXSEQ_EXPECTED:
4853 /* Keep frame for reassembly later */
4854 l2cap_pass_to_tx(chan, control);
4855 skb_queue_tail(&chan->srej_q, skb);
4857 BT_DBG("Queued %p (queue len %d)", skb,
4858 skb_queue_len(&chan->srej_q));
4860 chan->expected_tx_seq = __next_seq(chan, txseq);
4862 case L2CAP_TXSEQ_EXPECTED_SREJ:
4863 l2cap_seq_list_pop(&chan->srej_list);
4865 l2cap_pass_to_tx(chan, control);
4866 skb_queue_tail(&chan->srej_q, skb);
4868 BT_DBG("Queued %p (queue len %d)", skb,
4869 skb_queue_len(&chan->srej_q));
4871 err = l2cap_rx_queued_iframes(chan);
4876 case L2CAP_TXSEQ_UNEXPECTED:
4877 /* Got a frame that can't be reassembled yet.
4878 * Save it for later, and send SREJs to cover
4879 * the missing frames.
4881 skb_queue_tail(&chan->srej_q, skb);
4883 BT_DBG("Queued %p (queue len %d)", skb,
4884 skb_queue_len(&chan->srej_q));
4886 l2cap_pass_to_tx(chan, control);
4887 l2cap_send_srej(chan, control->txseq);
4889 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4890 /* This frame was requested with an SREJ, but
4891 * some expected retransmitted frames are
4892 * missing. Request retransmission of missing
4895 skb_queue_tail(&chan->srej_q, skb);
4897 BT_DBG("Queued %p (queue len %d)", skb,
4898 skb_queue_len(&chan->srej_q));
4900 l2cap_pass_to_tx(chan, control);
4901 l2cap_send_srej_list(chan, control->txseq);
4903 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4904 /* We've already queued this frame. Drop this copy. */
4905 l2cap_pass_to_tx(chan, control);
4907 case L2CAP_TXSEQ_DUPLICATE:
4908 /* Expecting a later sequence number, so this frame
4909 * was already received. Ignore it completely.
4912 case L2CAP_TXSEQ_INVALID_IGNORE:
4914 case L2CAP_TXSEQ_INVALID:
4916 l2cap_send_disconn_req(chan->conn, chan,
4921 case L2CAP_EV_RECV_RR:
4922 l2cap_pass_to_tx(chan, control);
4923 if (control->final) {
4924 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4926 if (!test_and_clear_bit(CONN_REJ_ACT,
4927 &chan->conn_state)) {
4929 l2cap_retransmit_all(chan, control);
4932 l2cap_ertm_send(chan);
4933 } else if (control->poll) {
4934 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4935 &chan->conn_state) &&
4936 chan->unacked_frames) {
4937 __set_retrans_timer(chan);
4940 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4941 l2cap_send_srej_tail(chan);
4943 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4944 &chan->conn_state) &&
4945 chan->unacked_frames)
4946 __set_retrans_timer(chan);
4948 l2cap_send_ack(chan);
4951 case L2CAP_EV_RECV_RNR:
4952 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4953 l2cap_pass_to_tx(chan, control);
4954 if (control->poll) {
4955 l2cap_send_srej_tail(chan);
4957 struct l2cap_ctrl rr_control;
4958 memset(&rr_control, 0, sizeof(rr_control));
4959 rr_control.sframe = 1;
4960 rr_control.super = L2CAP_SUPER_RR;
4961 rr_control.reqseq = chan->buffer_seq;
4962 l2cap_send_sframe(chan, &rr_control);
4966 case L2CAP_EV_RECV_REJ:
4967 l2cap_handle_rej(chan, control);
4969 case L2CAP_EV_RECV_SREJ:
4970 l2cap_handle_srej(chan, control);
4974 if (skb && !skb_in_use) {
4975 BT_DBG("Freeing %p", skb);
4982 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4984 /* Make sure reqseq is for a packet that has been sent but not acked */
4987 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4988 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4991 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4992 struct sk_buff *skb, u8 event)
4996 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4997 control, skb, event, chan->rx_state);
4999 if (__valid_reqseq(chan, control->reqseq)) {
5000 switch (chan->rx_state) {
5001 case L2CAP_RX_STATE_RECV:
5002 err = l2cap_rx_state_recv(chan, control, skb, event);
5004 case L2CAP_RX_STATE_SREJ_SENT:
5005 err = l2cap_rx_state_srej_sent(chan, control, skb,
5013 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5014 control->reqseq, chan->next_tx_seq,
5015 chan->expected_ack_seq);
5016 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5022 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5023 struct sk_buff *skb)
5027 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5030 if (l2cap_classify_txseq(chan, control->txseq) ==
5031 L2CAP_TXSEQ_EXPECTED) {
5032 l2cap_pass_to_tx(chan, control);
5034 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5035 __next_seq(chan, chan->buffer_seq));
5037 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5039 l2cap_reassemble_sdu(chan, skb, control);
5042 kfree_skb(chan->sdu);
5045 chan->sdu_last_frag = NULL;
5049 BT_DBG("Freeing %p", skb);
5054 chan->last_acked_seq = control->txseq;
5055 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5060 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5062 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5066 __unpack_control(chan, skb);
5071 * We can just drop the corrupted I-frame here.
5072 * Receiver will miss it and start proper recovery
5073 * procedures and ask for retransmission.
5075 if (l2cap_check_fcs(chan, skb))
5078 if (!control->sframe && control->sar == L2CAP_SAR_START)
5079 len -= L2CAP_SDULEN_SIZE;
5081 if (chan->fcs == L2CAP_FCS_CRC16)
5082 len -= L2CAP_FCS_SIZE;
5084 if (len > chan->mps) {
5085 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5089 if (!control->sframe) {
5092 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5093 control->sar, control->reqseq, control->final,
5096 /* Validate F-bit - F=0 always valid, F=1 only
5097 * valid in TX WAIT_F
5099 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5102 if (chan->mode != L2CAP_MODE_STREAMING) {
5103 event = L2CAP_EV_RECV_IFRAME;
5104 err = l2cap_rx(chan, control, skb, event);
5106 err = l2cap_stream_rx(chan, control, skb);
5110 l2cap_send_disconn_req(chan->conn, chan,
5113 const u8 rx_func_to_event[4] = {
5114 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5115 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5118 /* Only I-frames are expected in streaming mode */
5119 if (chan->mode == L2CAP_MODE_STREAMING)
5122 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5123 control->reqseq, control->final, control->poll,
5128 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5132 /* Validate F and P bits */
5133 if (control->final && (control->poll ||
5134 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5137 event = rx_func_to_event[control->super];
5138 if (l2cap_rx(chan, control, skb, event))
5139 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5149 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5150 struct sk_buff *skb)
5152 struct l2cap_chan *chan;
5154 chan = l2cap_get_chan_by_scid(conn, cid);
5156 if (cid == L2CAP_CID_A2MP) {
5157 chan = a2mp_channel_create(conn, skb);
5163 l2cap_chan_lock(chan);
5165 BT_DBG("unknown cid 0x%4.4x", cid);
5166 /* Drop packet and return */
5172 BT_DBG("chan %p, len %d", chan, skb->len);
5174 if (chan->state != BT_CONNECTED)
5177 switch (chan->mode) {
5178 case L2CAP_MODE_BASIC:
5179 /* If socket recv buffers overflows we drop data here
5180 * which is *bad* because L2CAP has to be reliable.
5181 * But we don't have any other choice. L2CAP doesn't
5182 * provide flow control mechanism. */
5184 if (chan->imtu < skb->len)
5187 if (!chan->ops->recv(chan, skb))
5191 case L2CAP_MODE_ERTM:
5192 case L2CAP_MODE_STREAMING:
5193 l2cap_data_rcv(chan, skb);
5197 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5205 l2cap_chan_unlock(chan);
5208 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5209 struct sk_buff *skb)
5211 struct l2cap_chan *chan;
5213 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5217 BT_DBG("chan %p, len %d", chan, skb->len);
5219 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5222 if (chan->imtu < skb->len)
5225 if (!chan->ops->recv(chan, skb))
5232 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5233 struct sk_buff *skb)
5235 struct l2cap_chan *chan;
5237 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5241 BT_DBG("chan %p, len %d", chan, skb->len);
5243 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5246 if (chan->imtu < skb->len)
5249 if (!chan->ops->recv(chan, skb))
5256 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5258 struct l2cap_hdr *lh = (void *) skb->data;
5262 skb_pull(skb, L2CAP_HDR_SIZE);
5263 cid = __le16_to_cpu(lh->cid);
5264 len = __le16_to_cpu(lh->len);
5266 if (len != skb->len) {
5271 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5274 case L2CAP_CID_LE_SIGNALING:
5275 case L2CAP_CID_SIGNALING:
5276 l2cap_sig_channel(conn, skb);
5279 case L2CAP_CID_CONN_LESS:
5280 psm = get_unaligned((__le16 *) skb->data);
5281 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5282 l2cap_conless_channel(conn, psm, skb);
5285 case L2CAP_CID_LE_DATA:
5286 l2cap_att_channel(conn, cid, skb);
5290 if (smp_sig_channel(conn, skb))
5291 l2cap_conn_del(conn->hcon, EACCES);
5295 l2cap_data_channel(conn, cid, skb);
5300 /* ---- L2CAP interface with lower layer (HCI) ---- */
5302 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5304 int exact = 0, lm1 = 0, lm2 = 0;
5305 struct l2cap_chan *c;
5307 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5309 /* Find listening sockets and check their link_mode */
5310 read_lock(&chan_list_lock);
5311 list_for_each_entry(c, &chan_list, global_l) {
5312 struct sock *sk = c->sk;
5314 if (c->state != BT_LISTEN)
5317 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5318 lm1 |= HCI_LM_ACCEPT;
5319 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5320 lm1 |= HCI_LM_MASTER;
5322 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5323 lm2 |= HCI_LM_ACCEPT;
5324 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5325 lm2 |= HCI_LM_MASTER;
5328 read_unlock(&chan_list_lock);
5330 return exact ? lm1 : lm2;
5333 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5335 struct l2cap_conn *conn;
5337 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5340 conn = l2cap_conn_add(hcon, status);
5342 l2cap_conn_ready(conn);
5344 l2cap_conn_del(hcon, bt_to_errno(status));
5349 int l2cap_disconn_ind(struct hci_conn *hcon)
5351 struct l2cap_conn *conn = hcon->l2cap_data;
5353 BT_DBG("hcon %p", hcon);
5356 return HCI_ERROR_REMOTE_USER_TERM;
5357 return conn->disc_reason;
5360 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5362 BT_DBG("hcon %p reason %d", hcon, reason);
5364 l2cap_conn_del(hcon, bt_to_errno(reason));
5368 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5370 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5373 if (encrypt == 0x00) {
5374 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5375 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5376 } else if (chan->sec_level == BT_SECURITY_HIGH)
5377 l2cap_chan_close(chan, ECONNREFUSED);
5379 if (chan->sec_level == BT_SECURITY_MEDIUM)
5380 __clear_chan_timer(chan);
5384 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5386 struct l2cap_conn *conn = hcon->l2cap_data;
5387 struct l2cap_chan *chan;
5392 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5394 if (hcon->type == LE_LINK) {
5395 if (!status && encrypt)
5396 smp_distribute_keys(conn, 0);
5397 cancel_delayed_work(&conn->security_timer);
5400 mutex_lock(&conn->chan_lock);
5402 list_for_each_entry(chan, &conn->chan_l, list) {
5403 l2cap_chan_lock(chan);
5405 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5406 state_to_string(chan->state));
5408 if (chan->scid == L2CAP_CID_LE_DATA) {
5409 if (!status && encrypt) {
5410 chan->sec_level = hcon->sec_level;
5411 l2cap_chan_ready(chan);
5414 l2cap_chan_unlock(chan);
5418 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5419 l2cap_chan_unlock(chan);
5423 if (!status && (chan->state == BT_CONNECTED ||
5424 chan->state == BT_CONFIG)) {
5425 struct sock *sk = chan->sk;
5427 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5428 sk->sk_state_change(sk);
5430 l2cap_check_encryption(chan, encrypt);
5431 l2cap_chan_unlock(chan);
5435 if (chan->state == BT_CONNECT) {
5437 l2cap_send_conn_req(chan);
5439 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5441 } else if (chan->state == BT_CONNECT2) {
5442 struct sock *sk = chan->sk;
5443 struct l2cap_conn_rsp rsp;
5449 if (test_bit(BT_SK_DEFER_SETUP,
5450 &bt_sk(sk)->flags)) {
5451 struct sock *parent = bt_sk(sk)->parent;
5452 res = L2CAP_CR_PEND;
5453 stat = L2CAP_CS_AUTHOR_PEND;
5455 parent->sk_data_ready(parent, 0);
5457 __l2cap_state_change(chan, BT_CONFIG);
5458 res = L2CAP_CR_SUCCESS;
5459 stat = L2CAP_CS_NO_INFO;
5462 __l2cap_state_change(chan, BT_DISCONN);
5463 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5464 res = L2CAP_CR_SEC_BLOCK;
5465 stat = L2CAP_CS_NO_INFO;
5470 rsp.scid = cpu_to_le16(chan->dcid);
5471 rsp.dcid = cpu_to_le16(chan->scid);
5472 rsp.result = cpu_to_le16(res);
5473 rsp.status = cpu_to_le16(stat);
5474 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5477 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5478 res == L2CAP_CR_SUCCESS) {
5480 set_bit(CONF_REQ_SENT, &chan->conf_state);
5481 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5483 l2cap_build_conf_req(chan, buf),
5485 chan->num_conf_req++;
5489 l2cap_chan_unlock(chan);
5492 mutex_unlock(&conn->chan_lock);
5497 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5499 struct l2cap_conn *conn = hcon->l2cap_data;
5502 conn = l2cap_conn_add(hcon, 0);
5507 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5509 if (!(flags & ACL_CONT)) {
5510 struct l2cap_hdr *hdr;
5514 BT_ERR("Unexpected start frame (len %d)", skb->len);
5515 kfree_skb(conn->rx_skb);
5516 conn->rx_skb = NULL;
5518 l2cap_conn_unreliable(conn, ECOMM);
5521 /* Start fragment always begin with Basic L2CAP header */
5522 if (skb->len < L2CAP_HDR_SIZE) {
5523 BT_ERR("Frame is too short (len %d)", skb->len);
5524 l2cap_conn_unreliable(conn, ECOMM);
5528 hdr = (struct l2cap_hdr *) skb->data;
5529 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5531 if (len == skb->len) {
5532 /* Complete frame received */
5533 l2cap_recv_frame(conn, skb);
5537 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5539 if (skb->len > len) {
5540 BT_ERR("Frame is too long (len %d, expected len %d)",
5542 l2cap_conn_unreliable(conn, ECOMM);
5546 /* Allocate skb for the complete frame (with header) */
5547 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5551 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5553 conn->rx_len = len - skb->len;
5555 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5557 if (!conn->rx_len) {
5558 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5559 l2cap_conn_unreliable(conn, ECOMM);
5563 if (skb->len > conn->rx_len) {
5564 BT_ERR("Fragment is too long (len %d, expected %d)",
5565 skb->len, conn->rx_len);
5566 kfree_skb(conn->rx_skb);
5567 conn->rx_skb = NULL;
5569 l2cap_conn_unreliable(conn, ECOMM);
5573 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5575 conn->rx_len -= skb->len;
5577 if (!conn->rx_len) {
5578 /* Complete frame received */
5579 l2cap_recv_frame(conn, conn->rx_skb);
5580 conn->rx_skb = NULL;
5589 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5591 struct l2cap_chan *c;
5593 read_lock(&chan_list_lock);
5595 list_for_each_entry(c, &chan_list, global_l) {
5596 struct sock *sk = c->sk;
5598 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5599 batostr(&bt_sk(sk)->src),
5600 batostr(&bt_sk(sk)->dst),
5601 c->state, __le16_to_cpu(c->psm),
5602 c->scid, c->dcid, c->imtu, c->omtu,
5603 c->sec_level, c->mode);
5606 read_unlock(&chan_list_lock);
5611 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5613 return single_open(file, l2cap_debugfs_show, inode->i_private);
5616 static const struct file_operations l2cap_debugfs_fops = {
5617 .open = l2cap_debugfs_open,
5619 .llseek = seq_lseek,
5620 .release = single_release,
5623 static struct dentry *l2cap_debugfs;
5625 int __init l2cap_init(void)
5629 err = l2cap_init_sockets();
5634 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5635 bt_debugfs, NULL, &l2cap_debugfs_fops);
5637 BT_ERR("Failed to create L2CAP debug file");
5643 void l2cap_exit(void)
5645 debugfs_remove(l2cap_debugfs);
5646 l2cap_cleanup_sockets();
5649 module_param(disable_ertm, bool, 0644);
5650 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");