2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
43 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
44 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46 static LIST_HEAD(chan_list);
47 static DEFINE_RWLOCK(chan_list_lock);
49 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
50 u8 code, u8 ident, u16 dlen, void *data);
51 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
54 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
55 struct l2cap_chan *chan, int err);
57 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
58 struct sk_buff_head *skbs, u8 event);
60 /* ---- L2CAP channels ---- */
62 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
66 list_for_each_entry(c, &conn->chan_l, list) {
73 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
77 list_for_each_entry(c, &conn->chan_l, list) {
84 /* Find channel with given SCID.
85 * Returns locked channel. */
86 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 mutex_lock(&conn->chan_lock);
91 c = __l2cap_get_chan_by_scid(conn, cid);
94 mutex_unlock(&conn->chan_lock);
99 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 struct l2cap_chan *c;
103 list_for_each_entry(c, &conn->chan_l, list) {
104 if (c->ident == ident)
110 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 struct l2cap_chan *c;
114 list_for_each_entry(c, &chan_list, global_l) {
115 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
121 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
125 write_lock(&chan_list_lock);
127 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
140 for (p = 0x1001; p < 0x1100; p += 2)
141 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
142 chan->psm = cpu_to_le16(p);
143 chan->sport = cpu_to_le16(p);
150 write_unlock(&chan_list_lock);
154 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 write_lock(&chan_list_lock);
160 write_unlock(&chan_list_lock);
165 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 u16 cid = L2CAP_CID_DYN_START;
169 for (; cid < L2CAP_CID_DYN_END; cid++) {
170 if (!__l2cap_get_chan_by_scid(conn, cid))
177 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
180 state_to_string(state));
183 chan->ops->state_change(chan, state);
186 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 struct sock *sk = chan->sk;
191 __l2cap_state_change(chan, state);
195 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 struct sock *sk = chan->sk;
202 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 struct sock *sk = chan->sk;
207 __l2cap_chan_set_err(chan, err);
211 static void __set_retrans_timer(struct l2cap_chan *chan)
213 if (!delayed_work_pending(&chan->monitor_timer) &&
214 chan->retrans_timeout) {
215 l2cap_set_timer(chan, &chan->retrans_timer,
216 msecs_to_jiffies(chan->retrans_timeout));
220 static void __set_monitor_timer(struct l2cap_chan *chan)
222 __clear_retrans_timer(chan);
223 if (chan->monitor_timeout) {
224 l2cap_set_timer(chan, &chan->monitor_timer,
225 msecs_to_jiffies(chan->monitor_timeout));
229 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
234 skb_queue_walk(head, skb) {
235 if (bt_cb(skb)->control.txseq == seq)
242 /* ---- L2CAP sequence number lists ---- */
244 /* For ERTM, ordered lists of sequence numbers must be tracked for
245 * SREJ requests that are received and for frames that are to be
246 * retransmitted. These seq_list functions implement a singly-linked
247 * list in an array, where membership in the list can also be checked
248 * in constant time. Items can also be added to the tail of the list
249 * and removed from the head in constant time, without further memory
253 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 size_t alloc_size, i;
257 /* Allocated size is a power of 2 to map sequence numbers
258 * (which may be up to 14 bits) in to a smaller array that is
259 * sized for the negotiated ERTM transmit windows.
261 alloc_size = roundup_pow_of_two(size);
263 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
267 seq_list->mask = alloc_size - 1;
268 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
269 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
270 for (i = 0; i < alloc_size; i++)
271 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
276 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 kfree(seq_list->list);
281 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
284 /* Constant-time check for list membership */
285 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
288 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 u16 mask = seq_list->mask;
292 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
293 /* In case someone tries to pop the head of an empty list */
294 return L2CAP_SEQ_LIST_CLEAR;
295 } else if (seq_list->head == seq) {
296 /* Head can be removed in constant time */
297 seq_list->head = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
301 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
302 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
305 /* Walk the list to find the sequence number */
306 u16 prev = seq_list->head;
307 while (seq_list->list[prev & mask] != seq) {
308 prev = seq_list->list[prev & mask];
309 if (prev == L2CAP_SEQ_LIST_TAIL)
310 return L2CAP_SEQ_LIST_CLEAR;
313 /* Unlink the number from the list and clear it */
314 seq_list->list[prev & mask] = seq_list->list[seq & mask];
315 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
316 if (seq_list->tail == seq)
317 seq_list->tail = prev;
322 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 /* Remove the head in constant time */
325 return l2cap_seq_list_remove(seq_list, seq_list->head);
328 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
332 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
335 for (i = 0; i <= seq_list->mask; i++)
336 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
342 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 u16 mask = seq_list->mask;
346 /* All appends happen in constant time */
348 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
351 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
352 seq_list->head = seq;
354 seq_list->list[seq_list->tail & mask] = seq;
356 seq_list->tail = seq;
357 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
360 static void l2cap_chan_timeout(struct work_struct *work)
362 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 struct l2cap_conn *conn = chan->conn;
367 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369 mutex_lock(&conn->chan_lock);
370 l2cap_chan_lock(chan);
372 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
373 reason = ECONNREFUSED;
374 else if (chan->state == BT_CONNECT &&
375 chan->sec_level != BT_SECURITY_SDP)
376 reason = ECONNREFUSED;
380 l2cap_chan_close(chan, reason);
382 l2cap_chan_unlock(chan);
384 chan->ops->close(chan);
385 mutex_unlock(&conn->chan_lock);
387 l2cap_chan_put(chan);
390 struct l2cap_chan *l2cap_chan_create(void)
392 struct l2cap_chan *chan;
394 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
398 mutex_init(&chan->lock);
400 write_lock(&chan_list_lock);
401 list_add(&chan->global_l, &chan_list);
402 write_unlock(&chan_list_lock);
404 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406 chan->state = BT_OPEN;
408 atomic_set(&chan->refcnt, 1);
410 /* This flag is cleared in l2cap_chan_ready() */
411 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413 BT_DBG("chan %p", chan);
418 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 write_lock(&chan_list_lock);
421 list_del(&chan->global_l);
422 write_unlock(&chan_list_lock);
424 l2cap_chan_put(chan);
427 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 chan->fcs = L2CAP_FCS_CRC16;
430 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
431 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
432 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
433 chan->sec_level = BT_SECURITY_LOW;
435 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
438 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
440 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
441 __le16_to_cpu(chan->psm), chan->dcid);
443 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
447 switch (chan->chan_type) {
448 case L2CAP_CHAN_CONN_ORIENTED:
449 if (conn->hcon->type == LE_LINK) {
451 chan->omtu = L2CAP_LE_DEFAULT_MTU;
452 chan->scid = L2CAP_CID_LE_DATA;
453 chan->dcid = L2CAP_CID_LE_DATA;
455 /* Alloc CID for connection-oriented socket */
456 chan->scid = l2cap_alloc_cid(conn);
457 chan->omtu = L2CAP_DEFAULT_MTU;
461 case L2CAP_CHAN_CONN_LESS:
462 /* Connectionless socket */
463 chan->scid = L2CAP_CID_CONN_LESS;
464 chan->dcid = L2CAP_CID_CONN_LESS;
465 chan->omtu = L2CAP_DEFAULT_MTU;
469 /* Raw socket can send/recv signalling messages only */
470 chan->scid = L2CAP_CID_SIGNALING;
471 chan->dcid = L2CAP_CID_SIGNALING;
472 chan->omtu = L2CAP_DEFAULT_MTU;
475 chan->local_id = L2CAP_BESTEFFORT_ID;
476 chan->local_stype = L2CAP_SERV_BESTEFFORT;
477 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
478 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
479 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
480 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
482 l2cap_chan_hold(chan);
484 list_add(&chan->list, &conn->chan_l);
487 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
489 mutex_lock(&conn->chan_lock);
490 __l2cap_chan_add(conn, chan);
491 mutex_unlock(&conn->chan_lock);
494 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
496 struct sock *sk = chan->sk;
497 struct l2cap_conn *conn = chan->conn;
498 struct sock *parent = bt_sk(sk)->parent;
500 __clear_chan_timer(chan);
502 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
505 /* Delete from channel list */
506 list_del(&chan->list);
508 l2cap_chan_put(chan);
511 hci_conn_put(conn->hcon);
516 __l2cap_state_change(chan, BT_CLOSED);
517 sock_set_flag(sk, SOCK_ZAPPED);
520 __l2cap_chan_set_err(chan, err);
523 bt_accept_unlink(sk);
524 parent->sk_data_ready(parent, 0);
526 sk->sk_state_change(sk);
530 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
534 case L2CAP_MODE_BASIC:
537 case L2CAP_MODE_ERTM:
538 __clear_retrans_timer(chan);
539 __clear_monitor_timer(chan);
540 __clear_ack_timer(chan);
542 skb_queue_purge(&chan->srej_q);
544 l2cap_seq_list_free(&chan->srej_list);
545 l2cap_seq_list_free(&chan->retrans_list);
549 case L2CAP_MODE_STREAMING:
550 skb_queue_purge(&chan->tx_q);
557 static void l2cap_chan_cleanup_listen(struct sock *parent)
561 BT_DBG("parent %p", parent);
563 /* Close not yet accepted channels */
564 while ((sk = bt_accept_dequeue(parent, NULL))) {
565 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
567 l2cap_chan_lock(chan);
568 __clear_chan_timer(chan);
569 l2cap_chan_close(chan, ECONNRESET);
570 l2cap_chan_unlock(chan);
572 chan->ops->close(chan);
576 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
578 struct l2cap_conn *conn = chan->conn;
579 struct sock *sk = chan->sk;
581 BT_DBG("chan %p state %s sk %p", chan,
582 state_to_string(chan->state), sk);
584 switch (chan->state) {
587 l2cap_chan_cleanup_listen(sk);
589 __l2cap_state_change(chan, BT_CLOSED);
590 sock_set_flag(sk, SOCK_ZAPPED);
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 __set_chan_timer(chan, sk->sk_sndtimeo);
599 l2cap_send_disconn_req(conn, chan, reason);
601 l2cap_chan_del(chan, reason);
605 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
606 conn->hcon->type == ACL_LINK) {
607 struct l2cap_conn_rsp rsp;
610 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
611 result = L2CAP_CR_SEC_BLOCK;
613 result = L2CAP_CR_BAD_PSM;
614 l2cap_state_change(chan, BT_DISCONN);
616 rsp.scid = cpu_to_le16(chan->dcid);
617 rsp.dcid = cpu_to_le16(chan->scid);
618 rsp.result = cpu_to_le16(result);
619 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
620 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
624 l2cap_chan_del(chan, reason);
629 l2cap_chan_del(chan, reason);
634 sock_set_flag(sk, SOCK_ZAPPED);
640 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
642 if (chan->chan_type == L2CAP_CHAN_RAW) {
643 switch (chan->sec_level) {
644 case BT_SECURITY_HIGH:
645 return HCI_AT_DEDICATED_BONDING_MITM;
646 case BT_SECURITY_MEDIUM:
647 return HCI_AT_DEDICATED_BONDING;
649 return HCI_AT_NO_BONDING;
651 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
652 if (chan->sec_level == BT_SECURITY_LOW)
653 chan->sec_level = BT_SECURITY_SDP;
655 if (chan->sec_level == BT_SECURITY_HIGH)
656 return HCI_AT_NO_BONDING_MITM;
658 return HCI_AT_NO_BONDING;
660 switch (chan->sec_level) {
661 case BT_SECURITY_HIGH:
662 return HCI_AT_GENERAL_BONDING_MITM;
663 case BT_SECURITY_MEDIUM:
664 return HCI_AT_GENERAL_BONDING;
666 return HCI_AT_NO_BONDING;
671 /* Service level security */
672 int l2cap_chan_check_security(struct l2cap_chan *chan)
674 struct l2cap_conn *conn = chan->conn;
677 auth_type = l2cap_get_auth_type(chan);
679 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
682 static u8 l2cap_get_ident(struct l2cap_conn *conn)
686 /* Get next available identificator.
687 * 1 - 128 are used by kernel.
688 * 129 - 199 are reserved.
689 * 200 - 254 are used by utilities like l2ping, etc.
692 spin_lock(&conn->lock);
694 if (++conn->tx_ident > 128)
699 spin_unlock(&conn->lock);
704 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
706 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
709 BT_DBG("code 0x%2.2x", code);
714 if (lmp_no_flush_capable(conn->hcon->hdev))
715 flags = ACL_START_NO_FLUSH;
719 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
720 skb->priority = HCI_PRIO_MAX;
722 hci_send_acl(conn->hchan, skb, flags);
725 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
727 struct hci_conn *hcon = chan->conn->hcon;
730 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
733 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
734 lmp_no_flush_capable(hcon->hdev))
735 flags = ACL_START_NO_FLUSH;
739 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
740 hci_send_acl(chan->conn->hchan, skb, flags);
743 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
745 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
746 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
748 if (enh & L2CAP_CTRL_FRAME_TYPE) {
751 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
752 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
759 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
760 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
767 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
769 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
770 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
772 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
775 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
776 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
783 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
784 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
791 static inline void __unpack_control(struct l2cap_chan *chan,
794 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
795 __unpack_extended_control(get_unaligned_le32(skb->data),
796 &bt_cb(skb)->control);
797 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
799 __unpack_enhanced_control(get_unaligned_le16(skb->data),
800 &bt_cb(skb)->control);
801 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
805 static u32 __pack_extended_control(struct l2cap_ctrl *control)
809 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
810 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
812 if (control->sframe) {
813 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
814 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
815 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
817 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
818 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
824 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
828 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
829 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
831 if (control->sframe) {
832 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
833 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
834 packed |= L2CAP_CTRL_FRAME_TYPE;
836 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
837 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
843 static inline void __pack_control(struct l2cap_chan *chan,
844 struct l2cap_ctrl *control,
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 put_unaligned_le32(__pack_extended_control(control),
849 skb->data + L2CAP_HDR_SIZE);
851 put_unaligned_le16(__pack_enhanced_control(control),
852 skb->data + L2CAP_HDR_SIZE);
856 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
860 struct l2cap_hdr *lh;
863 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
864 hlen = L2CAP_EXT_HDR_SIZE;
866 hlen = L2CAP_ENH_HDR_SIZE;
868 if (chan->fcs == L2CAP_FCS_CRC16)
869 hlen += L2CAP_FCS_SIZE;
871 skb = bt_skb_alloc(hlen, GFP_KERNEL);
874 return ERR_PTR(-ENOMEM);
876 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
877 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
878 lh->cid = cpu_to_le16(chan->dcid);
880 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
881 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
883 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
885 if (chan->fcs == L2CAP_FCS_CRC16) {
886 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
887 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
890 skb->priority = HCI_PRIO_MAX;
894 static void l2cap_send_sframe(struct l2cap_chan *chan,
895 struct l2cap_ctrl *control)
900 BT_DBG("chan %p, control %p", chan, control);
902 if (!control->sframe)
905 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
909 if (control->super == L2CAP_SUPER_RR)
910 clear_bit(CONN_RNR_SENT, &chan->conn_state);
911 else if (control->super == L2CAP_SUPER_RNR)
912 set_bit(CONN_RNR_SENT, &chan->conn_state);
914 if (control->super != L2CAP_SUPER_SREJ) {
915 chan->last_acked_seq = control->reqseq;
916 __clear_ack_timer(chan);
919 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
920 control->final, control->poll, control->super);
922 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
923 control_field = __pack_extended_control(control);
925 control_field = __pack_enhanced_control(control);
927 skb = l2cap_create_sframe_pdu(chan, control_field);
929 l2cap_do_send(chan, skb);
932 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
934 struct l2cap_ctrl control;
936 BT_DBG("chan %p, poll %d", chan, poll);
938 memset(&control, 0, sizeof(control));
942 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
943 control.super = L2CAP_SUPER_RNR;
945 control.super = L2CAP_SUPER_RR;
947 control.reqseq = chan->buffer_seq;
948 l2cap_send_sframe(chan, &control);
951 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
953 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
956 static void l2cap_send_conn_req(struct l2cap_chan *chan)
958 struct l2cap_conn *conn = chan->conn;
959 struct l2cap_conn_req req;
961 req.scid = cpu_to_le16(chan->scid);
964 chan->ident = l2cap_get_ident(conn);
966 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
968 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
971 static void l2cap_chan_ready(struct l2cap_chan *chan)
973 struct sock *sk = chan->sk;
978 parent = bt_sk(sk)->parent;
980 BT_DBG("sk %p, parent %p", sk, parent);
982 /* This clears all conf flags, including CONF_NOT_COMPLETE */
983 chan->conf_state = 0;
984 __clear_chan_timer(chan);
986 __l2cap_state_change(chan, BT_CONNECTED);
987 sk->sk_state_change(sk);
990 parent->sk_data_ready(parent, 0);
995 static void l2cap_do_start(struct l2cap_chan *chan)
997 struct l2cap_conn *conn = chan->conn;
999 if (conn->hcon->type == LE_LINK) {
1000 l2cap_chan_ready(chan);
1004 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1005 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1008 if (l2cap_chan_check_security(chan) &&
1009 __l2cap_no_conn_pending(chan))
1010 l2cap_send_conn_req(chan);
1012 struct l2cap_info_req req;
1013 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1016 conn->info_ident = l2cap_get_ident(conn);
1018 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1020 l2cap_send_cmd(conn, conn->info_ident,
1021 L2CAP_INFO_REQ, sizeof(req), &req);
1025 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1027 u32 local_feat_mask = l2cap_feat_mask;
1029 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1032 case L2CAP_MODE_ERTM:
1033 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1034 case L2CAP_MODE_STREAMING:
1035 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1041 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1043 struct sock *sk = chan->sk;
1044 struct l2cap_disconn_req req;
1049 if (chan->mode == L2CAP_MODE_ERTM) {
1050 __clear_retrans_timer(chan);
1051 __clear_monitor_timer(chan);
1052 __clear_ack_timer(chan);
1055 req.dcid = cpu_to_le16(chan->dcid);
1056 req.scid = cpu_to_le16(chan->scid);
1057 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1058 L2CAP_DISCONN_REQ, sizeof(req), &req);
1061 __l2cap_state_change(chan, BT_DISCONN);
1062 __l2cap_chan_set_err(chan, err);
1066 /* ---- L2CAP connections ---- */
1067 static void l2cap_conn_start(struct l2cap_conn *conn)
1069 struct l2cap_chan *chan, *tmp;
1071 BT_DBG("conn %p", conn);
1073 mutex_lock(&conn->chan_lock);
1075 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1076 struct sock *sk = chan->sk;
1078 l2cap_chan_lock(chan);
1080 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1081 l2cap_chan_unlock(chan);
1085 if (chan->state == BT_CONNECT) {
1086 if (!l2cap_chan_check_security(chan) ||
1087 !__l2cap_no_conn_pending(chan)) {
1088 l2cap_chan_unlock(chan);
1092 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1093 && test_bit(CONF_STATE2_DEVICE,
1094 &chan->conf_state)) {
1095 l2cap_chan_close(chan, ECONNRESET);
1096 l2cap_chan_unlock(chan);
1100 l2cap_send_conn_req(chan);
1102 } else if (chan->state == BT_CONNECT2) {
1103 struct l2cap_conn_rsp rsp;
1105 rsp.scid = cpu_to_le16(chan->dcid);
1106 rsp.dcid = cpu_to_le16(chan->scid);
1108 if (l2cap_chan_check_security(chan)) {
1110 if (test_bit(BT_SK_DEFER_SETUP,
1111 &bt_sk(sk)->flags)) {
1112 struct sock *parent = bt_sk(sk)->parent;
1113 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1114 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1116 parent->sk_data_ready(parent, 0);
1119 __l2cap_state_change(chan, BT_CONFIG);
1120 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1121 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1125 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1126 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1129 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1132 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1133 rsp.result != L2CAP_CR_SUCCESS) {
1134 l2cap_chan_unlock(chan);
1138 set_bit(CONF_REQ_SENT, &chan->conf_state);
1139 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1140 l2cap_build_conf_req(chan, buf), buf);
1141 chan->num_conf_req++;
1144 l2cap_chan_unlock(chan);
1147 mutex_unlock(&conn->chan_lock);
1150 /* Find socket with cid and source/destination bdaddr.
1151 * Returns closest match, locked.
1153 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1157 struct l2cap_chan *c, *c1 = NULL;
1159 read_lock(&chan_list_lock);
1161 list_for_each_entry(c, &chan_list, global_l) {
1162 struct sock *sk = c->sk;
1164 if (state && c->state != state)
1167 if (c->scid == cid) {
1168 int src_match, dst_match;
1169 int src_any, dst_any;
1172 src_match = !bacmp(&bt_sk(sk)->src, src);
1173 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1174 if (src_match && dst_match) {
1175 read_unlock(&chan_list_lock);
1180 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1181 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1182 if ((src_match && dst_any) || (src_any && dst_match) ||
1183 (src_any && dst_any))
1188 read_unlock(&chan_list_lock);
1193 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1195 struct sock *parent, *sk;
1196 struct l2cap_chan *chan, *pchan;
1200 /* Check if we have socket listening on cid */
1201 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1202 conn->src, conn->dst);
1210 /* Check for backlog size */
1211 if (sk_acceptq_is_full(parent)) {
1212 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1216 chan = pchan->ops->new_connection(pchan);
1222 hci_conn_hold(conn->hcon);
1224 bacpy(&bt_sk(sk)->src, conn->src);
1225 bacpy(&bt_sk(sk)->dst, conn->dst);
1227 bt_accept_enqueue(parent, sk);
1229 l2cap_chan_add(conn, chan);
1231 l2cap_chan_ready(chan);
1234 release_sock(parent);
1237 static void l2cap_conn_ready(struct l2cap_conn *conn)
1239 struct l2cap_chan *chan;
1241 BT_DBG("conn %p", conn);
1243 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1244 l2cap_le_conn_ready(conn);
1246 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1247 smp_conn_security(conn, conn->hcon->pending_sec_level);
1249 mutex_lock(&conn->chan_lock);
1251 list_for_each_entry(chan, &conn->chan_l, list) {
1253 l2cap_chan_lock(chan);
1255 if (conn->hcon->type == LE_LINK) {
1256 if (smp_conn_security(conn, chan->sec_level))
1257 l2cap_chan_ready(chan);
1259 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1260 struct sock *sk = chan->sk;
1261 __clear_chan_timer(chan);
1263 __l2cap_state_change(chan, BT_CONNECTED);
1264 sk->sk_state_change(sk);
1267 } else if (chan->state == BT_CONNECT)
1268 l2cap_do_start(chan);
1270 l2cap_chan_unlock(chan);
1273 mutex_unlock(&conn->chan_lock);
1276 /* Notify sockets that we cannot guaranty reliability anymore */
1277 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1279 struct l2cap_chan *chan;
1281 BT_DBG("conn %p", conn);
1283 mutex_lock(&conn->chan_lock);
1285 list_for_each_entry(chan, &conn->chan_l, list) {
1286 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1287 __l2cap_chan_set_err(chan, err);
1290 mutex_unlock(&conn->chan_lock);
1293 static void l2cap_info_timeout(struct work_struct *work)
1295 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1298 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1299 conn->info_ident = 0;
1301 l2cap_conn_start(conn);
1304 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1306 struct l2cap_conn *conn = hcon->l2cap_data;
1307 struct l2cap_chan *chan, *l;
1312 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1314 kfree_skb(conn->rx_skb);
1316 mutex_lock(&conn->chan_lock);
1319 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1320 l2cap_chan_hold(chan);
1321 l2cap_chan_lock(chan);
1323 l2cap_chan_del(chan, err);
1325 l2cap_chan_unlock(chan);
1327 chan->ops->close(chan);
1328 l2cap_chan_put(chan);
1331 mutex_unlock(&conn->chan_lock);
1333 hci_chan_del(conn->hchan);
1335 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1336 cancel_delayed_work_sync(&conn->info_timer);
1338 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1339 cancel_delayed_work_sync(&conn->security_timer);
1340 smp_chan_destroy(conn);
1343 hcon->l2cap_data = NULL;
1347 static void security_timeout(struct work_struct *work)
1349 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1350 security_timer.work);
1352 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1355 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1357 struct l2cap_conn *conn = hcon->l2cap_data;
1358 struct hci_chan *hchan;
1363 hchan = hci_chan_create(hcon);
1367 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1369 hci_chan_del(hchan);
1373 hcon->l2cap_data = conn;
1375 conn->hchan = hchan;
1377 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1379 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1380 conn->mtu = hcon->hdev->le_mtu;
1382 conn->mtu = hcon->hdev->acl_mtu;
1384 conn->src = &hcon->hdev->bdaddr;
1385 conn->dst = &hcon->dst;
1387 conn->feat_mask = 0;
1389 spin_lock_init(&conn->lock);
1390 mutex_init(&conn->chan_lock);
1392 INIT_LIST_HEAD(&conn->chan_l);
1394 if (hcon->type == LE_LINK)
1395 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1397 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1399 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1404 /* ---- Socket interface ---- */
1406 /* Find socket with psm and source / destination bdaddr.
1407 * Returns closest match.
1409 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1413 struct l2cap_chan *c, *c1 = NULL;
1415 read_lock(&chan_list_lock);
1417 list_for_each_entry(c, &chan_list, global_l) {
1418 struct sock *sk = c->sk;
1420 if (state && c->state != state)
1423 if (c->psm == psm) {
1424 int src_match, dst_match;
1425 int src_any, dst_any;
1428 src_match = !bacmp(&bt_sk(sk)->src, src);
1429 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1430 if (src_match && dst_match) {
1431 read_unlock(&chan_list_lock);
1436 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1437 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1438 if ((src_match && dst_any) || (src_any && dst_match) ||
1439 (src_any && dst_any))
1444 read_unlock(&chan_list_lock);
1449 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1450 bdaddr_t *dst, u8 dst_type)
1452 struct sock *sk = chan->sk;
1453 bdaddr_t *src = &bt_sk(sk)->src;
1454 struct l2cap_conn *conn;
1455 struct hci_conn *hcon;
1456 struct hci_dev *hdev;
1460 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1461 dst_type, __le16_to_cpu(chan->psm));
1463 hdev = hci_get_route(dst, src);
1465 return -EHOSTUNREACH;
1469 l2cap_chan_lock(chan);
1471 /* PSM must be odd and lsb of upper byte must be 0 */
1472 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1473 chan->chan_type != L2CAP_CHAN_RAW) {
1478 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1483 switch (chan->mode) {
1484 case L2CAP_MODE_BASIC:
1486 case L2CAP_MODE_ERTM:
1487 case L2CAP_MODE_STREAMING:
1498 switch (sk->sk_state) {
1502 /* Already connecting */
1508 /* Already connected */
1524 /* Set destination address and psm */
1525 bacpy(&bt_sk(sk)->dst, dst);
1532 auth_type = l2cap_get_auth_type(chan);
1534 if (chan->dcid == L2CAP_CID_LE_DATA)
1535 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1536 chan->sec_level, auth_type);
1538 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1539 chan->sec_level, auth_type);
1542 err = PTR_ERR(hcon);
1546 conn = l2cap_conn_add(hcon, 0);
1553 if (hcon->type == LE_LINK) {
1556 if (!list_empty(&conn->chan_l)) {
1565 /* Update source addr of the socket */
1566 bacpy(src, conn->src);
1568 l2cap_chan_unlock(chan);
1569 l2cap_chan_add(conn, chan);
1570 l2cap_chan_lock(chan);
1572 l2cap_state_change(chan, BT_CONNECT);
1573 __set_chan_timer(chan, sk->sk_sndtimeo);
1575 if (hcon->state == BT_CONNECTED) {
1576 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1577 __clear_chan_timer(chan);
1578 if (l2cap_chan_check_security(chan))
1579 l2cap_state_change(chan, BT_CONNECTED);
1581 l2cap_do_start(chan);
1587 l2cap_chan_unlock(chan);
1588 hci_dev_unlock(hdev);
1593 int __l2cap_wait_ack(struct sock *sk)
1595 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1596 DECLARE_WAITQUEUE(wait, current);
1600 add_wait_queue(sk_sleep(sk), &wait);
1601 set_current_state(TASK_INTERRUPTIBLE);
1602 while (chan->unacked_frames > 0 && chan->conn) {
1606 if (signal_pending(current)) {
1607 err = sock_intr_errno(timeo);
1612 timeo = schedule_timeout(timeo);
1614 set_current_state(TASK_INTERRUPTIBLE);
1616 err = sock_error(sk);
1620 set_current_state(TASK_RUNNING);
1621 remove_wait_queue(sk_sleep(sk), &wait);
1625 static void l2cap_monitor_timeout(struct work_struct *work)
1627 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1628 monitor_timer.work);
1630 BT_DBG("chan %p", chan);
1632 l2cap_chan_lock(chan);
1635 l2cap_chan_unlock(chan);
1636 l2cap_chan_put(chan);
1640 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1642 l2cap_chan_unlock(chan);
1643 l2cap_chan_put(chan);
1646 static void l2cap_retrans_timeout(struct work_struct *work)
1648 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1649 retrans_timer.work);
1651 BT_DBG("chan %p", chan);
1653 l2cap_chan_lock(chan);
1656 l2cap_chan_unlock(chan);
1657 l2cap_chan_put(chan);
1661 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1662 l2cap_chan_unlock(chan);
1663 l2cap_chan_put(chan);
1666 static void l2cap_streaming_send(struct l2cap_chan *chan,
1667 struct sk_buff_head *skbs)
1669 struct sk_buff *skb;
1670 struct l2cap_ctrl *control;
1672 BT_DBG("chan %p, skbs %p", chan, skbs);
1674 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1676 while (!skb_queue_empty(&chan->tx_q)) {
1678 skb = skb_dequeue(&chan->tx_q);
1680 bt_cb(skb)->control.retries = 1;
1681 control = &bt_cb(skb)->control;
1683 control->reqseq = 0;
1684 control->txseq = chan->next_tx_seq;
1686 __pack_control(chan, control, skb);
1688 if (chan->fcs == L2CAP_FCS_CRC16) {
1689 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1690 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1693 l2cap_do_send(chan, skb);
1695 BT_DBG("Sent txseq %d", (int)control->txseq);
1697 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1698 chan->frames_sent++;
1702 static int l2cap_ertm_send(struct l2cap_chan *chan)
1704 struct sk_buff *skb, *tx_skb;
1705 struct l2cap_ctrl *control;
1708 BT_DBG("chan %p", chan);
1710 if (chan->state != BT_CONNECTED)
1713 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1716 while (chan->tx_send_head &&
1717 chan->unacked_frames < chan->remote_tx_win &&
1718 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1720 skb = chan->tx_send_head;
1722 bt_cb(skb)->control.retries = 1;
1723 control = &bt_cb(skb)->control;
1725 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1728 control->reqseq = chan->buffer_seq;
1729 chan->last_acked_seq = chan->buffer_seq;
1730 control->txseq = chan->next_tx_seq;
1732 __pack_control(chan, control, skb);
1734 if (chan->fcs == L2CAP_FCS_CRC16) {
1735 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1736 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1739 /* Clone after data has been modified. Data is assumed to be
1740 read-only (for locking purposes) on cloned sk_buffs.
1742 tx_skb = skb_clone(skb, GFP_KERNEL);
1747 __set_retrans_timer(chan);
1749 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1750 chan->unacked_frames++;
1751 chan->frames_sent++;
1754 if (skb_queue_is_last(&chan->tx_q, skb))
1755 chan->tx_send_head = NULL;
1757 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1759 l2cap_do_send(chan, tx_skb);
1760 BT_DBG("Sent txseq %d", (int)control->txseq);
1763 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1764 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1769 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1771 struct l2cap_ctrl control;
1772 struct sk_buff *skb;
1773 struct sk_buff *tx_skb;
1776 BT_DBG("chan %p", chan);
1778 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1781 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1782 seq = l2cap_seq_list_pop(&chan->retrans_list);
1784 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1786 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1791 bt_cb(skb)->control.retries++;
1792 control = bt_cb(skb)->control;
1794 if (chan->max_tx != 0 &&
1795 bt_cb(skb)->control.retries > chan->max_tx) {
1796 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1797 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1798 l2cap_seq_list_clear(&chan->retrans_list);
1802 control.reqseq = chan->buffer_seq;
1803 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1808 if (skb_cloned(skb)) {
1809 /* Cloned sk_buffs are read-only, so we need a
1812 tx_skb = skb_copy(skb, GFP_ATOMIC);
1814 tx_skb = skb_clone(skb, GFP_ATOMIC);
1818 l2cap_seq_list_clear(&chan->retrans_list);
1822 /* Update skb contents */
1823 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1824 put_unaligned_le32(__pack_extended_control(&control),
1825 tx_skb->data + L2CAP_HDR_SIZE);
1827 put_unaligned_le16(__pack_enhanced_control(&control),
1828 tx_skb->data + L2CAP_HDR_SIZE);
1831 if (chan->fcs == L2CAP_FCS_CRC16) {
1832 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1833 put_unaligned_le16(fcs, skb_put(tx_skb,
1837 l2cap_do_send(chan, tx_skb);
1839 BT_DBG("Resent txseq %d", control.txseq);
1841 chan->last_acked_seq = chan->buffer_seq;
1845 static void l2cap_retransmit(struct l2cap_chan *chan,
1846 struct l2cap_ctrl *control)
1848 BT_DBG("chan %p, control %p", chan, control);
1850 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1851 l2cap_ertm_resend(chan);
1854 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1855 struct l2cap_ctrl *control)
1857 struct sk_buff *skb;
1859 BT_DBG("chan %p, control %p", chan, control);
1862 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1864 l2cap_seq_list_clear(&chan->retrans_list);
1866 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1869 if (chan->unacked_frames) {
1870 skb_queue_walk(&chan->tx_q, skb) {
1871 if (bt_cb(skb)->control.txseq == control->reqseq ||
1872 skb == chan->tx_send_head)
1876 skb_queue_walk_from(&chan->tx_q, skb) {
1877 if (skb == chan->tx_send_head)
1880 l2cap_seq_list_append(&chan->retrans_list,
1881 bt_cb(skb)->control.txseq);
1884 l2cap_ertm_resend(chan);
1888 static void l2cap_send_ack(struct l2cap_chan *chan)
1890 struct l2cap_ctrl control;
1891 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1892 chan->last_acked_seq);
1895 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1896 chan, chan->last_acked_seq, chan->buffer_seq);
1898 memset(&control, 0, sizeof(control));
1901 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1902 chan->rx_state == L2CAP_RX_STATE_RECV) {
1903 __clear_ack_timer(chan);
1904 control.super = L2CAP_SUPER_RNR;
1905 control.reqseq = chan->buffer_seq;
1906 l2cap_send_sframe(chan, &control);
1908 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1909 l2cap_ertm_send(chan);
1910 /* If any i-frames were sent, they included an ack */
1911 if (chan->buffer_seq == chan->last_acked_seq)
1915 /* Ack now if the tx window is 3/4ths full.
1916 * Calculate without mul or div
1918 threshold = chan->tx_win;
1919 threshold += threshold << 1;
1922 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1925 if (frames_to_ack >= threshold) {
1926 __clear_ack_timer(chan);
1927 control.super = L2CAP_SUPER_RR;
1928 control.reqseq = chan->buffer_seq;
1929 l2cap_send_sframe(chan, &control);
1934 __set_ack_timer(chan);
1938 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1939 struct msghdr *msg, int len,
1940 int count, struct sk_buff *skb)
1942 struct l2cap_conn *conn = chan->conn;
1943 struct sk_buff **frag;
1946 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1952 /* Continuation fragments (no L2CAP header) */
1953 frag = &skb_shinfo(skb)->frag_list;
1955 struct sk_buff *tmp;
1957 count = min_t(unsigned int, conn->mtu, len);
1959 tmp = chan->ops->alloc_skb(chan, count,
1960 msg->msg_flags & MSG_DONTWAIT);
1962 return PTR_ERR(tmp);
1966 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1969 (*frag)->priority = skb->priority;
1974 skb->len += (*frag)->len;
1975 skb->data_len += (*frag)->len;
1977 frag = &(*frag)->next;
1983 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1984 struct msghdr *msg, size_t len,
1987 struct l2cap_conn *conn = chan->conn;
1988 struct sk_buff *skb;
1989 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1990 struct l2cap_hdr *lh;
1992 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1994 count = min_t(unsigned int, (conn->mtu - hlen), len);
1996 skb = chan->ops->alloc_skb(chan, count + hlen,
1997 msg->msg_flags & MSG_DONTWAIT);
2001 skb->priority = priority;
2003 /* Create L2CAP header */
2004 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2005 lh->cid = cpu_to_le16(chan->dcid);
2006 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2007 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2009 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2010 if (unlikely(err < 0)) {
2012 return ERR_PTR(err);
2017 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2018 struct msghdr *msg, size_t len,
2021 struct l2cap_conn *conn = chan->conn;
2022 struct sk_buff *skb;
2024 struct l2cap_hdr *lh;
2026 BT_DBG("chan %p len %d", chan, (int)len);
2028 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2030 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2031 msg->msg_flags & MSG_DONTWAIT);
2035 skb->priority = priority;
2037 /* Create L2CAP header */
2038 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2039 lh->cid = cpu_to_le16(chan->dcid);
2040 lh->len = cpu_to_le16(len);
2042 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2043 if (unlikely(err < 0)) {
2045 return ERR_PTR(err);
2050 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2051 struct msghdr *msg, size_t len,
2054 struct l2cap_conn *conn = chan->conn;
2055 struct sk_buff *skb;
2056 int err, count, hlen;
2057 struct l2cap_hdr *lh;
2059 BT_DBG("chan %p len %d", chan, (int)len);
2062 return ERR_PTR(-ENOTCONN);
2064 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2065 hlen = L2CAP_EXT_HDR_SIZE;
2067 hlen = L2CAP_ENH_HDR_SIZE;
2070 hlen += L2CAP_SDULEN_SIZE;
2072 if (chan->fcs == L2CAP_FCS_CRC16)
2073 hlen += L2CAP_FCS_SIZE;
2075 count = min_t(unsigned int, (conn->mtu - hlen), len);
2077 skb = chan->ops->alloc_skb(chan, count + hlen,
2078 msg->msg_flags & MSG_DONTWAIT);
2082 /* Create L2CAP header */
2083 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2084 lh->cid = cpu_to_le16(chan->dcid);
2085 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2087 /* Control header is populated later */
2088 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2089 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2091 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2094 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2096 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2097 if (unlikely(err < 0)) {
2099 return ERR_PTR(err);
2102 bt_cb(skb)->control.fcs = chan->fcs;
2103 bt_cb(skb)->control.retries = 0;
2107 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2108 struct sk_buff_head *seg_queue,
2109 struct msghdr *msg, size_t len)
2111 struct sk_buff *skb;
2117 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2119 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2120 * so fragmented skbs are not used. The HCI layer's handling
2121 * of fragmented skbs is not compatible with ERTM's queueing.
2124 /* PDU size is derived from the HCI MTU */
2125 pdu_len = chan->conn->mtu;
2127 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2129 /* Adjust for largest possible L2CAP overhead. */
2131 pdu_len -= L2CAP_FCS_SIZE;
2133 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2134 pdu_len -= L2CAP_EXT_HDR_SIZE;
2136 pdu_len -= L2CAP_ENH_HDR_SIZE;
2138 /* Remote device may have requested smaller PDUs */
2139 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2141 if (len <= pdu_len) {
2142 sar = L2CAP_SAR_UNSEGMENTED;
2146 sar = L2CAP_SAR_START;
2148 pdu_len -= L2CAP_SDULEN_SIZE;
2152 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2155 __skb_queue_purge(seg_queue);
2156 return PTR_ERR(skb);
2159 bt_cb(skb)->control.sar = sar;
2160 __skb_queue_tail(seg_queue, skb);
2165 pdu_len += L2CAP_SDULEN_SIZE;
2168 if (len <= pdu_len) {
2169 sar = L2CAP_SAR_END;
2172 sar = L2CAP_SAR_CONTINUE;
2179 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2182 struct sk_buff *skb;
2184 struct sk_buff_head seg_queue;
2186 /* Connectionless channel */
2187 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2188 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2190 return PTR_ERR(skb);
2192 l2cap_do_send(chan, skb);
2196 switch (chan->mode) {
2197 case L2CAP_MODE_BASIC:
2198 /* Check outgoing MTU */
2199 if (len > chan->omtu)
2202 /* Create a basic PDU */
2203 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2205 return PTR_ERR(skb);
2207 l2cap_do_send(chan, skb);
2211 case L2CAP_MODE_ERTM:
2212 case L2CAP_MODE_STREAMING:
2213 /* Check outgoing MTU */
2214 if (len > chan->omtu) {
2219 __skb_queue_head_init(&seg_queue);
2221 /* Do segmentation before calling in to the state machine,
2222 * since it's possible to block while waiting for memory
2225 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2227 /* The channel could have been closed while segmenting,
2228 * check that it is still connected.
2230 if (chan->state != BT_CONNECTED) {
2231 __skb_queue_purge(&seg_queue);
2238 if (chan->mode == L2CAP_MODE_ERTM)
2239 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2241 l2cap_streaming_send(chan, &seg_queue);
2245 /* If the skbs were not queued for sending, they'll still be in
2246 * seg_queue and need to be purged.
2248 __skb_queue_purge(&seg_queue);
2252 BT_DBG("bad state %1.1x", chan->mode);
2259 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2261 struct l2cap_ctrl control;
2264 BT_DBG("chan %p, txseq %d", chan, txseq);
2266 memset(&control, 0, sizeof(control));
2268 control.super = L2CAP_SUPER_SREJ;
2270 for (seq = chan->expected_tx_seq; seq != txseq;
2271 seq = __next_seq(chan, seq)) {
2272 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2273 control.reqseq = seq;
2274 l2cap_send_sframe(chan, &control);
2275 l2cap_seq_list_append(&chan->srej_list, seq);
2279 chan->expected_tx_seq = __next_seq(chan, txseq);
2282 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2284 struct l2cap_ctrl control;
2286 BT_DBG("chan %p", chan);
2288 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2291 memset(&control, 0, sizeof(control));
2293 control.super = L2CAP_SUPER_SREJ;
2294 control.reqseq = chan->srej_list.tail;
2295 l2cap_send_sframe(chan, &control);
2298 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2300 struct l2cap_ctrl control;
2304 BT_DBG("chan %p, txseq %d", chan, txseq);
2306 memset(&control, 0, sizeof(control));
2308 control.super = L2CAP_SUPER_SREJ;
2310 /* Capture initial list head to allow only one pass through the list. */
2311 initial_head = chan->srej_list.head;
2314 seq = l2cap_seq_list_pop(&chan->srej_list);
2315 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2318 control.reqseq = seq;
2319 l2cap_send_sframe(chan, &control);
2320 l2cap_seq_list_append(&chan->srej_list, seq);
2321 } while (chan->srej_list.head != initial_head);
2324 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2326 struct sk_buff *acked_skb;
2329 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2331 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2334 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2335 chan->expected_ack_seq, chan->unacked_frames);
2337 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2338 ackseq = __next_seq(chan, ackseq)) {
2340 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2342 skb_unlink(acked_skb, &chan->tx_q);
2343 kfree_skb(acked_skb);
2344 chan->unacked_frames--;
2348 chan->expected_ack_seq = reqseq;
2350 if (chan->unacked_frames == 0)
2351 __clear_retrans_timer(chan);
2353 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2356 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2358 BT_DBG("chan %p", chan);
2360 chan->expected_tx_seq = chan->buffer_seq;
2361 l2cap_seq_list_clear(&chan->srej_list);
2362 skb_queue_purge(&chan->srej_q);
2363 chan->rx_state = L2CAP_RX_STATE_RECV;
2366 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2367 struct l2cap_ctrl *control,
2368 struct sk_buff_head *skbs, u8 event)
2370 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2374 case L2CAP_EV_DATA_REQUEST:
2375 if (chan->tx_send_head == NULL)
2376 chan->tx_send_head = skb_peek(skbs);
2378 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2379 l2cap_ertm_send(chan);
2381 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2382 BT_DBG("Enter LOCAL_BUSY");
2383 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2385 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2386 /* The SREJ_SENT state must be aborted if we are to
2387 * enter the LOCAL_BUSY state.
2389 l2cap_abort_rx_srej_sent(chan);
2392 l2cap_send_ack(chan);
2395 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2396 BT_DBG("Exit LOCAL_BUSY");
2397 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2399 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2400 struct l2cap_ctrl local_control;
2402 memset(&local_control, 0, sizeof(local_control));
2403 local_control.sframe = 1;
2404 local_control.super = L2CAP_SUPER_RR;
2405 local_control.poll = 1;
2406 local_control.reqseq = chan->buffer_seq;
2407 l2cap_send_sframe(chan, &local_control);
2409 chan->retry_count = 1;
2410 __set_monitor_timer(chan);
2411 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2414 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2415 l2cap_process_reqseq(chan, control->reqseq);
2417 case L2CAP_EV_EXPLICIT_POLL:
2418 l2cap_send_rr_or_rnr(chan, 1);
2419 chan->retry_count = 1;
2420 __set_monitor_timer(chan);
2421 __clear_ack_timer(chan);
2422 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2424 case L2CAP_EV_RETRANS_TO:
2425 l2cap_send_rr_or_rnr(chan, 1);
2426 chan->retry_count = 1;
2427 __set_monitor_timer(chan);
2428 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2430 case L2CAP_EV_RECV_FBIT:
2431 /* Nothing to process */
2438 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2439 struct l2cap_ctrl *control,
2440 struct sk_buff_head *skbs, u8 event)
2442 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2446 case L2CAP_EV_DATA_REQUEST:
2447 if (chan->tx_send_head == NULL)
2448 chan->tx_send_head = skb_peek(skbs);
2449 /* Queue data, but don't send. */
2450 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2452 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2453 BT_DBG("Enter LOCAL_BUSY");
2454 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2456 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2457 /* The SREJ_SENT state must be aborted if we are to
2458 * enter the LOCAL_BUSY state.
2460 l2cap_abort_rx_srej_sent(chan);
2463 l2cap_send_ack(chan);
2466 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2467 BT_DBG("Exit LOCAL_BUSY");
2468 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2470 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2471 struct l2cap_ctrl local_control;
2472 memset(&local_control, 0, sizeof(local_control));
2473 local_control.sframe = 1;
2474 local_control.super = L2CAP_SUPER_RR;
2475 local_control.poll = 1;
2476 local_control.reqseq = chan->buffer_seq;
2477 l2cap_send_sframe(chan, &local_control);
2479 chan->retry_count = 1;
2480 __set_monitor_timer(chan);
2481 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2484 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2485 l2cap_process_reqseq(chan, control->reqseq);
2489 case L2CAP_EV_RECV_FBIT:
2490 if (control && control->final) {
2491 __clear_monitor_timer(chan);
2492 if (chan->unacked_frames > 0)
2493 __set_retrans_timer(chan);
2494 chan->retry_count = 0;
2495 chan->tx_state = L2CAP_TX_STATE_XMIT;
2496 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2499 case L2CAP_EV_EXPLICIT_POLL:
2502 case L2CAP_EV_MONITOR_TO:
2503 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2504 l2cap_send_rr_or_rnr(chan, 1);
2505 __set_monitor_timer(chan);
2506 chan->retry_count++;
2508 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2516 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2517 struct sk_buff_head *skbs, u8 event)
2519 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2520 chan, control, skbs, event, chan->tx_state);
2522 switch (chan->tx_state) {
2523 case L2CAP_TX_STATE_XMIT:
2524 l2cap_tx_state_xmit(chan, control, skbs, event);
2526 case L2CAP_TX_STATE_WAIT_F:
2527 l2cap_tx_state_wait_f(chan, control, skbs, event);
2535 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2536 struct l2cap_ctrl *control)
2538 BT_DBG("chan %p, control %p", chan, control);
2539 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2542 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2543 struct l2cap_ctrl *control)
2545 BT_DBG("chan %p, control %p", chan, control);
2546 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2549 /* Copy frame to all raw sockets on that connection */
2550 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2552 struct sk_buff *nskb;
2553 struct l2cap_chan *chan;
2555 BT_DBG("conn %p", conn);
2557 mutex_lock(&conn->chan_lock);
2559 list_for_each_entry(chan, &conn->chan_l, list) {
2560 struct sock *sk = chan->sk;
2561 if (chan->chan_type != L2CAP_CHAN_RAW)
2564 /* Don't send frame to the socket it came from */
2567 nskb = skb_clone(skb, GFP_ATOMIC);
2571 if (chan->ops->recv(chan, nskb))
2575 mutex_unlock(&conn->chan_lock);
2578 /* ---- L2CAP signalling commands ---- */
2579 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2580 u8 code, u8 ident, u16 dlen, void *data)
2582 struct sk_buff *skb, **frag;
2583 struct l2cap_cmd_hdr *cmd;
2584 struct l2cap_hdr *lh;
2587 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2588 conn, code, ident, dlen);
2590 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2591 count = min_t(unsigned int, conn->mtu, len);
2593 skb = bt_skb_alloc(count, GFP_ATOMIC);
2597 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2598 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2600 if (conn->hcon->type == LE_LINK)
2601 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2603 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2605 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2608 cmd->len = cpu_to_le16(dlen);
2611 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2612 memcpy(skb_put(skb, count), data, count);
2618 /* Continuation fragments (no L2CAP header) */
2619 frag = &skb_shinfo(skb)->frag_list;
2621 count = min_t(unsigned int, conn->mtu, len);
2623 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2627 memcpy(skb_put(*frag, count), data, count);
2632 frag = &(*frag)->next;
2642 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2644 struct l2cap_conf_opt *opt = *ptr;
2647 len = L2CAP_CONF_OPT_SIZE + opt->len;
2655 *val = *((u8 *) opt->val);
2659 *val = get_unaligned_le16(opt->val);
2663 *val = get_unaligned_le32(opt->val);
2667 *val = (unsigned long) opt->val;
2671 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2675 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2677 struct l2cap_conf_opt *opt = *ptr;
2679 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2686 *((u8 *) opt->val) = val;
2690 put_unaligned_le16(val, opt->val);
2694 put_unaligned_le32(val, opt->val);
2698 memcpy(opt->val, (void *) val, len);
2702 *ptr += L2CAP_CONF_OPT_SIZE + len;
2705 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2707 struct l2cap_conf_efs efs;
2709 switch (chan->mode) {
2710 case L2CAP_MODE_ERTM:
2711 efs.id = chan->local_id;
2712 efs.stype = chan->local_stype;
2713 efs.msdu = cpu_to_le16(chan->local_msdu);
2714 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2715 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2716 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2719 case L2CAP_MODE_STREAMING:
2721 efs.stype = L2CAP_SERV_BESTEFFORT;
2722 efs.msdu = cpu_to_le16(chan->local_msdu);
2723 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2732 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2733 (unsigned long) &efs);
2736 static void l2cap_ack_timeout(struct work_struct *work)
2738 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2742 BT_DBG("chan %p", chan);
2744 l2cap_chan_lock(chan);
2746 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2747 chan->last_acked_seq);
2750 l2cap_send_rr_or_rnr(chan, 0);
2752 l2cap_chan_unlock(chan);
2753 l2cap_chan_put(chan);
2756 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2760 chan->next_tx_seq = 0;
2761 chan->expected_tx_seq = 0;
2762 chan->expected_ack_seq = 0;
2763 chan->unacked_frames = 0;
2764 chan->buffer_seq = 0;
2765 chan->frames_sent = 0;
2766 chan->last_acked_seq = 0;
2768 chan->sdu_last_frag = NULL;
2771 skb_queue_head_init(&chan->tx_q);
2773 if (chan->mode != L2CAP_MODE_ERTM)
2776 chan->rx_state = L2CAP_RX_STATE_RECV;
2777 chan->tx_state = L2CAP_TX_STATE_XMIT;
2779 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2780 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2781 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2783 skb_queue_head_init(&chan->srej_q);
2785 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2789 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2791 l2cap_seq_list_free(&chan->srej_list);
2796 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2799 case L2CAP_MODE_STREAMING:
2800 case L2CAP_MODE_ERTM:
2801 if (l2cap_mode_supported(mode, remote_feat_mask))
2805 return L2CAP_MODE_BASIC;
2809 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2811 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2814 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2816 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2819 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2821 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2822 __l2cap_ews_supported(chan)) {
2823 /* use extended control field */
2824 set_bit(FLAG_EXT_CTRL, &chan->flags);
2825 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2827 chan->tx_win = min_t(u16, chan->tx_win,
2828 L2CAP_DEFAULT_TX_WINDOW);
2829 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2833 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2835 struct l2cap_conf_req *req = data;
2836 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2837 void *ptr = req->data;
2840 BT_DBG("chan %p", chan);
2842 if (chan->num_conf_req || chan->num_conf_rsp)
2845 switch (chan->mode) {
2846 case L2CAP_MODE_STREAMING:
2847 case L2CAP_MODE_ERTM:
2848 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2851 if (__l2cap_efs_supported(chan))
2852 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2856 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2861 if (chan->imtu != L2CAP_DEFAULT_MTU)
2862 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2864 switch (chan->mode) {
2865 case L2CAP_MODE_BASIC:
2866 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2867 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2870 rfc.mode = L2CAP_MODE_BASIC;
2872 rfc.max_transmit = 0;
2873 rfc.retrans_timeout = 0;
2874 rfc.monitor_timeout = 0;
2875 rfc.max_pdu_size = 0;
2877 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2878 (unsigned long) &rfc);
2881 case L2CAP_MODE_ERTM:
2882 rfc.mode = L2CAP_MODE_ERTM;
2883 rfc.max_transmit = chan->max_tx;
2884 rfc.retrans_timeout = 0;
2885 rfc.monitor_timeout = 0;
2887 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2888 L2CAP_EXT_HDR_SIZE -
2891 rfc.max_pdu_size = cpu_to_le16(size);
2893 l2cap_txwin_setup(chan);
2895 rfc.txwin_size = min_t(u16, chan->tx_win,
2896 L2CAP_DEFAULT_TX_WINDOW);
2898 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2899 (unsigned long) &rfc);
2901 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2902 l2cap_add_opt_efs(&ptr, chan);
2904 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2907 if (chan->fcs == L2CAP_FCS_NONE ||
2908 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2909 chan->fcs = L2CAP_FCS_NONE;
2910 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2913 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2914 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2918 case L2CAP_MODE_STREAMING:
2919 l2cap_txwin_setup(chan);
2920 rfc.mode = L2CAP_MODE_STREAMING;
2922 rfc.max_transmit = 0;
2923 rfc.retrans_timeout = 0;
2924 rfc.monitor_timeout = 0;
2926 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2927 L2CAP_EXT_HDR_SIZE -
2930 rfc.max_pdu_size = cpu_to_le16(size);
2932 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2933 (unsigned long) &rfc);
2935 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2936 l2cap_add_opt_efs(&ptr, chan);
2938 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2941 if (chan->fcs == L2CAP_FCS_NONE ||
2942 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2943 chan->fcs = L2CAP_FCS_NONE;
2944 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2949 req->dcid = cpu_to_le16(chan->dcid);
2950 req->flags = __constant_cpu_to_le16(0);
2955 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2957 struct l2cap_conf_rsp *rsp = data;
2958 void *ptr = rsp->data;
2959 void *req = chan->conf_req;
2960 int len = chan->conf_len;
2961 int type, hint, olen;
2963 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2964 struct l2cap_conf_efs efs;
2966 u16 mtu = L2CAP_DEFAULT_MTU;
2967 u16 result = L2CAP_CONF_SUCCESS;
2970 BT_DBG("chan %p", chan);
2972 while (len >= L2CAP_CONF_OPT_SIZE) {
2973 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2975 hint = type & L2CAP_CONF_HINT;
2976 type &= L2CAP_CONF_MASK;
2979 case L2CAP_CONF_MTU:
2983 case L2CAP_CONF_FLUSH_TO:
2984 chan->flush_to = val;
2987 case L2CAP_CONF_QOS:
2990 case L2CAP_CONF_RFC:
2991 if (olen == sizeof(rfc))
2992 memcpy(&rfc, (void *) val, olen);
2995 case L2CAP_CONF_FCS:
2996 if (val == L2CAP_FCS_NONE)
2997 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3000 case L2CAP_CONF_EFS:
3002 if (olen == sizeof(efs))
3003 memcpy(&efs, (void *) val, olen);
3006 case L2CAP_CONF_EWS:
3008 return -ECONNREFUSED;
3010 set_bit(FLAG_EXT_CTRL, &chan->flags);
3011 set_bit(CONF_EWS_RECV, &chan->conf_state);
3012 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3013 chan->remote_tx_win = val;
3020 result = L2CAP_CONF_UNKNOWN;
3021 *((u8 *) ptr++) = type;
3026 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3029 switch (chan->mode) {
3030 case L2CAP_MODE_STREAMING:
3031 case L2CAP_MODE_ERTM:
3032 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3033 chan->mode = l2cap_select_mode(rfc.mode,
3034 chan->conn->feat_mask);
3039 if (__l2cap_efs_supported(chan))
3040 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3042 return -ECONNREFUSED;
3045 if (chan->mode != rfc.mode)
3046 return -ECONNREFUSED;
3052 if (chan->mode != rfc.mode) {
3053 result = L2CAP_CONF_UNACCEPT;
3054 rfc.mode = chan->mode;
3056 if (chan->num_conf_rsp == 1)
3057 return -ECONNREFUSED;
3059 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3060 sizeof(rfc), (unsigned long) &rfc);
3063 if (result == L2CAP_CONF_SUCCESS) {
3064 /* Configure output options and let the other side know
3065 * which ones we don't like. */
3067 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3068 result = L2CAP_CONF_UNACCEPT;
3071 set_bit(CONF_MTU_DONE, &chan->conf_state);
3073 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3076 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3077 efs.stype != L2CAP_SERV_NOTRAFIC &&
3078 efs.stype != chan->local_stype) {
3080 result = L2CAP_CONF_UNACCEPT;
3082 if (chan->num_conf_req >= 1)
3083 return -ECONNREFUSED;
3085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3087 (unsigned long) &efs);
3089 /* Send PENDING Conf Rsp */
3090 result = L2CAP_CONF_PENDING;
3091 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3096 case L2CAP_MODE_BASIC:
3097 chan->fcs = L2CAP_FCS_NONE;
3098 set_bit(CONF_MODE_DONE, &chan->conf_state);
3101 case L2CAP_MODE_ERTM:
3102 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3103 chan->remote_tx_win = rfc.txwin_size;
3105 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3107 chan->remote_max_tx = rfc.max_transmit;
3109 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3111 L2CAP_EXT_HDR_SIZE -
3114 rfc.max_pdu_size = cpu_to_le16(size);
3115 chan->remote_mps = size;
3117 rfc.retrans_timeout =
3118 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3119 rfc.monitor_timeout =
3120 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3122 set_bit(CONF_MODE_DONE, &chan->conf_state);
3124 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3125 sizeof(rfc), (unsigned long) &rfc);
3127 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3128 chan->remote_id = efs.id;
3129 chan->remote_stype = efs.stype;
3130 chan->remote_msdu = le16_to_cpu(efs.msdu);
3131 chan->remote_flush_to =
3132 le32_to_cpu(efs.flush_to);
3133 chan->remote_acc_lat =
3134 le32_to_cpu(efs.acc_lat);
3135 chan->remote_sdu_itime =
3136 le32_to_cpu(efs.sdu_itime);
3137 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3138 sizeof(efs), (unsigned long) &efs);
3142 case L2CAP_MODE_STREAMING:
3143 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3145 L2CAP_EXT_HDR_SIZE -
3148 rfc.max_pdu_size = cpu_to_le16(size);
3149 chan->remote_mps = size;
3151 set_bit(CONF_MODE_DONE, &chan->conf_state);
3153 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3154 sizeof(rfc), (unsigned long) &rfc);
3159 result = L2CAP_CONF_UNACCEPT;
3161 memset(&rfc, 0, sizeof(rfc));
3162 rfc.mode = chan->mode;
3165 if (result == L2CAP_CONF_SUCCESS)
3166 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3168 rsp->scid = cpu_to_le16(chan->dcid);
3169 rsp->result = cpu_to_le16(result);
3170 rsp->flags = __constant_cpu_to_le16(0);
3175 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3177 struct l2cap_conf_req *req = data;
3178 void *ptr = req->data;
3181 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3182 struct l2cap_conf_efs efs;
3184 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3186 while (len >= L2CAP_CONF_OPT_SIZE) {
3187 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3190 case L2CAP_CONF_MTU:
3191 if (val < L2CAP_DEFAULT_MIN_MTU) {
3192 *result = L2CAP_CONF_UNACCEPT;
3193 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3196 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3199 case L2CAP_CONF_FLUSH_TO:
3200 chan->flush_to = val;
3201 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3205 case L2CAP_CONF_RFC:
3206 if (olen == sizeof(rfc))
3207 memcpy(&rfc, (void *)val, olen);
3209 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3210 rfc.mode != chan->mode)
3211 return -ECONNREFUSED;
3215 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3216 sizeof(rfc), (unsigned long) &rfc);
3219 case L2CAP_CONF_EWS:
3220 chan->tx_win = min_t(u16, val,
3221 L2CAP_DEFAULT_EXT_WINDOW);
3222 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3226 case L2CAP_CONF_EFS:
3227 if (olen == sizeof(efs))
3228 memcpy(&efs, (void *)val, olen);
3230 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3231 efs.stype != L2CAP_SERV_NOTRAFIC &&
3232 efs.stype != chan->local_stype)
3233 return -ECONNREFUSED;
3235 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3236 sizeof(efs), (unsigned long) &efs);
3241 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3242 return -ECONNREFUSED;
3244 chan->mode = rfc.mode;
3246 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3248 case L2CAP_MODE_ERTM:
3249 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3250 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3251 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3253 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3254 chan->local_msdu = le16_to_cpu(efs.msdu);
3255 chan->local_sdu_itime =
3256 le32_to_cpu(efs.sdu_itime);
3257 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3258 chan->local_flush_to =
3259 le32_to_cpu(efs.flush_to);
3263 case L2CAP_MODE_STREAMING:
3264 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3268 req->dcid = cpu_to_le16(chan->dcid);
3269 req->flags = __constant_cpu_to_le16(0);
3274 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3276 struct l2cap_conf_rsp *rsp = data;
3277 void *ptr = rsp->data;
3279 BT_DBG("chan %p", chan);
3281 rsp->scid = cpu_to_le16(chan->dcid);
3282 rsp->result = cpu_to_le16(result);
3283 rsp->flags = cpu_to_le16(flags);
3288 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3290 struct l2cap_conn_rsp rsp;
3291 struct l2cap_conn *conn = chan->conn;
3294 rsp.scid = cpu_to_le16(chan->dcid);
3295 rsp.dcid = cpu_to_le16(chan->scid);
3296 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3297 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3298 l2cap_send_cmd(conn, chan->ident,
3299 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3301 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3304 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3305 l2cap_build_conf_req(chan, buf), buf);
3306 chan->num_conf_req++;
3309 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3313 struct l2cap_conf_rfc rfc;
3315 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3317 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3320 while (len >= L2CAP_CONF_OPT_SIZE) {
3321 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3324 case L2CAP_CONF_RFC:
3325 if (olen == sizeof(rfc))
3326 memcpy(&rfc, (void *)val, olen);
3331 /* Use sane default values in case a misbehaving remote device
3332 * did not send an RFC option.
3334 rfc.mode = chan->mode;
3335 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3336 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3337 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3339 BT_ERR("Expected RFC option was not found, using defaults");
3343 case L2CAP_MODE_ERTM:
3344 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3345 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3346 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3348 case L2CAP_MODE_STREAMING:
3349 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3353 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3355 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3357 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3360 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3361 cmd->ident == conn->info_ident) {
3362 cancel_delayed_work(&conn->info_timer);
3364 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3365 conn->info_ident = 0;
3367 l2cap_conn_start(conn);
3373 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3375 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3376 struct l2cap_conn_rsp rsp;
3377 struct l2cap_chan *chan = NULL, *pchan;
3378 struct sock *parent, *sk = NULL;
3379 int result, status = L2CAP_CS_NO_INFO;
3381 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3382 __le16 psm = req->psm;
3384 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3386 /* Check if we have socket listening on psm */
3387 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3389 result = L2CAP_CR_BAD_PSM;
3395 mutex_lock(&conn->chan_lock);
3398 /* Check if the ACL is secure enough (if not SDP) */
3399 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3400 !hci_conn_check_link_mode(conn->hcon)) {
3401 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3402 result = L2CAP_CR_SEC_BLOCK;
3406 result = L2CAP_CR_NO_MEM;
3408 /* Check for backlog size */
3409 if (sk_acceptq_is_full(parent)) {
3410 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3414 chan = pchan->ops->new_connection(pchan);
3420 /* Check if we already have channel with that dcid */
3421 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3422 sock_set_flag(sk, SOCK_ZAPPED);
3423 chan->ops->close(chan);
3427 hci_conn_hold(conn->hcon);
3429 bacpy(&bt_sk(sk)->src, conn->src);
3430 bacpy(&bt_sk(sk)->dst, conn->dst);
3434 bt_accept_enqueue(parent, sk);
3436 __l2cap_chan_add(conn, chan);
3440 __set_chan_timer(chan, sk->sk_sndtimeo);
3442 chan->ident = cmd->ident;
3444 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3445 if (l2cap_chan_check_security(chan)) {
3446 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3447 __l2cap_state_change(chan, BT_CONNECT2);
3448 result = L2CAP_CR_PEND;
3449 status = L2CAP_CS_AUTHOR_PEND;
3450 parent->sk_data_ready(parent, 0);
3452 __l2cap_state_change(chan, BT_CONFIG);
3453 result = L2CAP_CR_SUCCESS;
3454 status = L2CAP_CS_NO_INFO;
3457 __l2cap_state_change(chan, BT_CONNECT2);
3458 result = L2CAP_CR_PEND;
3459 status = L2CAP_CS_AUTHEN_PEND;
3462 __l2cap_state_change(chan, BT_CONNECT2);
3463 result = L2CAP_CR_PEND;
3464 status = L2CAP_CS_NO_INFO;
3468 release_sock(parent);
3469 mutex_unlock(&conn->chan_lock);
3472 rsp.scid = cpu_to_le16(scid);
3473 rsp.dcid = cpu_to_le16(dcid);
3474 rsp.result = cpu_to_le16(result);
3475 rsp.status = cpu_to_le16(status);
3476 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3478 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3479 struct l2cap_info_req info;
3480 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3482 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3483 conn->info_ident = l2cap_get_ident(conn);
3485 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3487 l2cap_send_cmd(conn, conn->info_ident,
3488 L2CAP_INFO_REQ, sizeof(info), &info);
3491 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3492 result == L2CAP_CR_SUCCESS) {
3494 set_bit(CONF_REQ_SENT, &chan->conf_state);
3495 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3496 l2cap_build_conf_req(chan, buf), buf);
3497 chan->num_conf_req++;
3503 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3505 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3506 u16 scid, dcid, result, status;
3507 struct l2cap_chan *chan;
3511 scid = __le16_to_cpu(rsp->scid);
3512 dcid = __le16_to_cpu(rsp->dcid);
3513 result = __le16_to_cpu(rsp->result);
3514 status = __le16_to_cpu(rsp->status);
3516 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3517 dcid, scid, result, status);
3519 mutex_lock(&conn->chan_lock);
3522 chan = __l2cap_get_chan_by_scid(conn, scid);
3528 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3537 l2cap_chan_lock(chan);
3540 case L2CAP_CR_SUCCESS:
3541 l2cap_state_change(chan, BT_CONFIG);
3544 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3546 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3549 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3550 l2cap_build_conf_req(chan, req), req);
3551 chan->num_conf_req++;
3555 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3559 l2cap_chan_del(chan, ECONNREFUSED);
3563 l2cap_chan_unlock(chan);
3566 mutex_unlock(&conn->chan_lock);
3571 static inline void set_default_fcs(struct l2cap_chan *chan)
3573 /* FCS is enabled only in ERTM or streaming mode, if one or both
3576 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3577 chan->fcs = L2CAP_FCS_NONE;
3578 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3579 chan->fcs = L2CAP_FCS_CRC16;
3582 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3584 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3587 struct l2cap_chan *chan;
3590 dcid = __le16_to_cpu(req->dcid);
3591 flags = __le16_to_cpu(req->flags);
3593 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3595 chan = l2cap_get_chan_by_scid(conn, dcid);
3599 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3600 struct l2cap_cmd_rej_cid rej;
3602 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3603 rej.scid = cpu_to_le16(chan->scid);
3604 rej.dcid = cpu_to_le16(chan->dcid);
3606 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3611 /* Reject if config buffer is too small. */
3612 len = cmd_len - sizeof(*req);
3613 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3614 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3615 l2cap_build_conf_rsp(chan, rsp,
3616 L2CAP_CONF_REJECT, flags), rsp);
3621 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3622 chan->conf_len += len;
3624 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3625 /* Incomplete config. Send empty response. */
3626 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3627 l2cap_build_conf_rsp(chan, rsp,
3628 L2CAP_CONF_SUCCESS, flags), rsp);
3632 /* Complete config. */
3633 len = l2cap_parse_conf_req(chan, rsp);
3635 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3639 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3640 chan->num_conf_rsp++;
3642 /* Reset config buffer. */
3645 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3648 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3649 set_default_fcs(chan);
3651 if (chan->mode == L2CAP_MODE_ERTM ||
3652 chan->mode == L2CAP_MODE_STREAMING)
3653 err = l2cap_ertm_init(chan);
3656 l2cap_send_disconn_req(chan->conn, chan, -err);
3658 l2cap_chan_ready(chan);
3663 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3665 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3666 l2cap_build_conf_req(chan, buf), buf);
3667 chan->num_conf_req++;
3670 /* Got Conf Rsp PENDING from remote side and asume we sent
3671 Conf Rsp PENDING in the code above */
3672 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3673 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3675 /* check compatibility */
3677 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3678 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3680 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3681 l2cap_build_conf_rsp(chan, rsp,
3682 L2CAP_CONF_SUCCESS, flags), rsp);
3686 l2cap_chan_unlock(chan);
3690 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3692 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3693 u16 scid, flags, result;
3694 struct l2cap_chan *chan;
3695 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3698 scid = __le16_to_cpu(rsp->scid);
3699 flags = __le16_to_cpu(rsp->flags);
3700 result = __le16_to_cpu(rsp->result);
3702 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3705 chan = l2cap_get_chan_by_scid(conn, scid);
3710 case L2CAP_CONF_SUCCESS:
3711 l2cap_conf_rfc_get(chan, rsp->data, len);
3712 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3715 case L2CAP_CONF_PENDING:
3716 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3718 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3721 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3724 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3728 /* check compatibility */
3730 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3731 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3733 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3734 l2cap_build_conf_rsp(chan, buf,
3735 L2CAP_CONF_SUCCESS, 0x0000), buf);
3739 case L2CAP_CONF_UNACCEPT:
3740 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3743 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3744 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3748 /* throw out any old stored conf requests */
3749 result = L2CAP_CONF_SUCCESS;
3750 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3753 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3757 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3758 L2CAP_CONF_REQ, len, req);
3759 chan->num_conf_req++;
3760 if (result != L2CAP_CONF_SUCCESS)
3766 l2cap_chan_set_err(chan, ECONNRESET);
3768 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3769 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3773 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3776 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3778 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3779 set_default_fcs(chan);
3781 if (chan->mode == L2CAP_MODE_ERTM ||
3782 chan->mode == L2CAP_MODE_STREAMING)
3783 err = l2cap_ertm_init(chan);
3786 l2cap_send_disconn_req(chan->conn, chan, -err);
3788 l2cap_chan_ready(chan);
3792 l2cap_chan_unlock(chan);
3796 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3798 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3799 struct l2cap_disconn_rsp rsp;
3801 struct l2cap_chan *chan;
3804 scid = __le16_to_cpu(req->scid);
3805 dcid = __le16_to_cpu(req->dcid);
3807 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3809 mutex_lock(&conn->chan_lock);
3811 chan = __l2cap_get_chan_by_scid(conn, dcid);
3813 mutex_unlock(&conn->chan_lock);
3817 l2cap_chan_lock(chan);
3821 rsp.dcid = cpu_to_le16(chan->scid);
3822 rsp.scid = cpu_to_le16(chan->dcid);
3823 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3826 sk->sk_shutdown = SHUTDOWN_MASK;
3829 l2cap_chan_hold(chan);
3830 l2cap_chan_del(chan, ECONNRESET);
3832 l2cap_chan_unlock(chan);
3834 chan->ops->close(chan);
3835 l2cap_chan_put(chan);
3837 mutex_unlock(&conn->chan_lock);
3842 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3844 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3846 struct l2cap_chan *chan;
3848 scid = __le16_to_cpu(rsp->scid);
3849 dcid = __le16_to_cpu(rsp->dcid);
3851 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3853 mutex_lock(&conn->chan_lock);
3855 chan = __l2cap_get_chan_by_scid(conn, scid);
3857 mutex_unlock(&conn->chan_lock);
3861 l2cap_chan_lock(chan);
3863 l2cap_chan_hold(chan);
3864 l2cap_chan_del(chan, 0);
3866 l2cap_chan_unlock(chan);
3868 chan->ops->close(chan);
3869 l2cap_chan_put(chan);
3871 mutex_unlock(&conn->chan_lock);
3876 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3878 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3881 type = __le16_to_cpu(req->type);
3883 BT_DBG("type 0x%4.4x", type);
3885 if (type == L2CAP_IT_FEAT_MASK) {
3887 u32 feat_mask = l2cap_feat_mask;
3888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3889 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3890 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3892 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3895 feat_mask |= L2CAP_FEAT_EXT_FLOW
3896 | L2CAP_FEAT_EXT_WINDOW;
3898 put_unaligned_le32(feat_mask, rsp->data);
3899 l2cap_send_cmd(conn, cmd->ident,
3900 L2CAP_INFO_RSP, sizeof(buf), buf);
3901 } else if (type == L2CAP_IT_FIXED_CHAN) {
3903 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3906 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3908 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3910 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3911 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3912 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3913 l2cap_send_cmd(conn, cmd->ident,
3914 L2CAP_INFO_RSP, sizeof(buf), buf);
3916 struct l2cap_info_rsp rsp;
3917 rsp.type = cpu_to_le16(type);
3918 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3919 l2cap_send_cmd(conn, cmd->ident,
3920 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3926 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3928 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3931 type = __le16_to_cpu(rsp->type);
3932 result = __le16_to_cpu(rsp->result);
3934 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3936 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3937 if (cmd->ident != conn->info_ident ||
3938 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3941 cancel_delayed_work(&conn->info_timer);
3943 if (result != L2CAP_IR_SUCCESS) {
3944 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3945 conn->info_ident = 0;
3947 l2cap_conn_start(conn);
3953 case L2CAP_IT_FEAT_MASK:
3954 conn->feat_mask = get_unaligned_le32(rsp->data);
3956 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3957 struct l2cap_info_req req;
3958 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3960 conn->info_ident = l2cap_get_ident(conn);
3962 l2cap_send_cmd(conn, conn->info_ident,
3963 L2CAP_INFO_REQ, sizeof(req), &req);
3965 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3966 conn->info_ident = 0;
3968 l2cap_conn_start(conn);
3972 case L2CAP_IT_FIXED_CHAN:
3973 conn->fixed_chan_mask = rsp->data[0];
3974 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3975 conn->info_ident = 0;
3977 l2cap_conn_start(conn);
3984 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3985 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3988 struct l2cap_create_chan_req *req = data;
3989 struct l2cap_create_chan_rsp rsp;
3992 if (cmd_len != sizeof(*req))
3998 psm = le16_to_cpu(req->psm);
3999 scid = le16_to_cpu(req->scid);
4001 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
4003 /* Placeholder: Always reject */
4005 rsp.scid = cpu_to_le16(scid);
4006 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4007 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4009 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4015 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4016 struct l2cap_cmd_hdr *cmd, void *data)
4018 BT_DBG("conn %p", conn);
4020 return l2cap_connect_rsp(conn, cmd, data);
4023 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4024 u16 icid, u16 result)
4026 struct l2cap_move_chan_rsp rsp;
4028 BT_DBG("icid %d, result %d", icid, result);
4030 rsp.icid = cpu_to_le16(icid);
4031 rsp.result = cpu_to_le16(result);
4033 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4036 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4037 struct l2cap_chan *chan, u16 icid, u16 result)
4039 struct l2cap_move_chan_cfm cfm;
4042 BT_DBG("icid %d, result %d", icid, result);
4044 ident = l2cap_get_ident(conn);
4046 chan->ident = ident;
4048 cfm.icid = cpu_to_le16(icid);
4049 cfm.result = cpu_to_le16(result);
4051 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4054 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4057 struct l2cap_move_chan_cfm_rsp rsp;
4059 BT_DBG("icid %d", icid);
4061 rsp.icid = cpu_to_le16(icid);
4062 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4065 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4066 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4068 struct l2cap_move_chan_req *req = data;
4070 u16 result = L2CAP_MR_NOT_ALLOWED;
4072 if (cmd_len != sizeof(*req))
4075 icid = le16_to_cpu(req->icid);
4077 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4082 /* Placeholder: Always refuse */
4083 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4088 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4089 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4091 struct l2cap_move_chan_rsp *rsp = data;
4094 if (cmd_len != sizeof(*rsp))
4097 icid = le16_to_cpu(rsp->icid);
4098 result = le16_to_cpu(rsp->result);
4100 BT_DBG("icid %d, result %d", icid, result);
4102 /* Placeholder: Always unconfirmed */
4103 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4108 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4109 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4111 struct l2cap_move_chan_cfm *cfm = data;
4114 if (cmd_len != sizeof(*cfm))
4117 icid = le16_to_cpu(cfm->icid);
4118 result = le16_to_cpu(cfm->result);
4120 BT_DBG("icid %d, result %d", icid, result);
4122 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4127 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4128 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4130 struct l2cap_move_chan_cfm_rsp *rsp = data;
4133 if (cmd_len != sizeof(*rsp))
4136 icid = le16_to_cpu(rsp->icid);
4138 BT_DBG("icid %d", icid);
4143 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4148 if (min > max || min < 6 || max > 3200)
4151 if (to_multiplier < 10 || to_multiplier > 3200)
4154 if (max >= to_multiplier * 8)
4157 max_latency = (to_multiplier * 8 / max) - 1;
4158 if (latency > 499 || latency > max_latency)
4164 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4165 struct l2cap_cmd_hdr *cmd, u8 *data)
4167 struct hci_conn *hcon = conn->hcon;
4168 struct l2cap_conn_param_update_req *req;
4169 struct l2cap_conn_param_update_rsp rsp;
4170 u16 min, max, latency, to_multiplier, cmd_len;
4173 if (!(hcon->link_mode & HCI_LM_MASTER))
4176 cmd_len = __le16_to_cpu(cmd->len);
4177 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4180 req = (struct l2cap_conn_param_update_req *) data;
4181 min = __le16_to_cpu(req->min);
4182 max = __le16_to_cpu(req->max);
4183 latency = __le16_to_cpu(req->latency);
4184 to_multiplier = __le16_to_cpu(req->to_multiplier);
4186 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4187 min, max, latency, to_multiplier);
4189 memset(&rsp, 0, sizeof(rsp));
4191 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4193 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4195 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4197 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4201 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4206 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4207 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4211 switch (cmd->code) {
4212 case L2CAP_COMMAND_REJ:
4213 l2cap_command_rej(conn, cmd, data);
4216 case L2CAP_CONN_REQ:
4217 err = l2cap_connect_req(conn, cmd, data);
4220 case L2CAP_CONN_RSP:
4221 err = l2cap_connect_rsp(conn, cmd, data);
4224 case L2CAP_CONF_REQ:
4225 err = l2cap_config_req(conn, cmd, cmd_len, data);
4228 case L2CAP_CONF_RSP:
4229 err = l2cap_config_rsp(conn, cmd, data);
4232 case L2CAP_DISCONN_REQ:
4233 err = l2cap_disconnect_req(conn, cmd, data);
4236 case L2CAP_DISCONN_RSP:
4237 err = l2cap_disconnect_rsp(conn, cmd, data);
4240 case L2CAP_ECHO_REQ:
4241 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4244 case L2CAP_ECHO_RSP:
4247 case L2CAP_INFO_REQ:
4248 err = l2cap_information_req(conn, cmd, data);
4251 case L2CAP_INFO_RSP:
4252 err = l2cap_information_rsp(conn, cmd, data);
4255 case L2CAP_CREATE_CHAN_REQ:
4256 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4259 case L2CAP_CREATE_CHAN_RSP:
4260 err = l2cap_create_channel_rsp(conn, cmd, data);
4263 case L2CAP_MOVE_CHAN_REQ:
4264 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4267 case L2CAP_MOVE_CHAN_RSP:
4268 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4271 case L2CAP_MOVE_CHAN_CFM:
4272 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4275 case L2CAP_MOVE_CHAN_CFM_RSP:
4276 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4280 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4288 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4289 struct l2cap_cmd_hdr *cmd, u8 *data)
4291 switch (cmd->code) {
4292 case L2CAP_COMMAND_REJ:
4295 case L2CAP_CONN_PARAM_UPDATE_REQ:
4296 return l2cap_conn_param_update_req(conn, cmd, data);
4298 case L2CAP_CONN_PARAM_UPDATE_RSP:
4302 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4307 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4308 struct sk_buff *skb)
4310 u8 *data = skb->data;
4312 struct l2cap_cmd_hdr cmd;
4315 l2cap_raw_recv(conn, skb);
4317 while (len >= L2CAP_CMD_HDR_SIZE) {
4319 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4320 data += L2CAP_CMD_HDR_SIZE;
4321 len -= L2CAP_CMD_HDR_SIZE;
4323 cmd_len = le16_to_cpu(cmd.len);
4325 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4327 if (cmd_len > len || !cmd.ident) {
4328 BT_DBG("corrupted command");
4332 if (conn->hcon->type == LE_LINK)
4333 err = l2cap_le_sig_cmd(conn, &cmd, data);
4335 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4338 struct l2cap_cmd_rej_unk rej;
4340 BT_ERR("Wrong link type (%d)", err);
4342 /* FIXME: Map err to a valid reason */
4343 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4344 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4354 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4356 u16 our_fcs, rcv_fcs;
4359 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4360 hdr_size = L2CAP_EXT_HDR_SIZE;
4362 hdr_size = L2CAP_ENH_HDR_SIZE;
4364 if (chan->fcs == L2CAP_FCS_CRC16) {
4365 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4366 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4367 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4369 if (our_fcs != rcv_fcs)
4375 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4377 struct l2cap_ctrl control;
4379 BT_DBG("chan %p", chan);
4381 memset(&control, 0, sizeof(control));
4384 control.reqseq = chan->buffer_seq;
4385 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4387 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4388 control.super = L2CAP_SUPER_RNR;
4389 l2cap_send_sframe(chan, &control);
4392 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4393 chan->unacked_frames > 0)
4394 __set_retrans_timer(chan);
4396 /* Send pending iframes */
4397 l2cap_ertm_send(chan);
4399 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4400 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4401 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4404 control.super = L2CAP_SUPER_RR;
4405 l2cap_send_sframe(chan, &control);
4409 static void append_skb_frag(struct sk_buff *skb,
4410 struct sk_buff *new_frag, struct sk_buff **last_frag)
4412 /* skb->len reflects data in skb as well as all fragments
4413 * skb->data_len reflects only data in fragments
4415 if (!skb_has_frag_list(skb))
4416 skb_shinfo(skb)->frag_list = new_frag;
4418 new_frag->next = NULL;
4420 (*last_frag)->next = new_frag;
4421 *last_frag = new_frag;
4423 skb->len += new_frag->len;
4424 skb->data_len += new_frag->len;
4425 skb->truesize += new_frag->truesize;
4428 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4429 struct l2cap_ctrl *control)
4433 switch (control->sar) {
4434 case L2CAP_SAR_UNSEGMENTED:
4438 err = chan->ops->recv(chan, skb);
4441 case L2CAP_SAR_START:
4445 chan->sdu_len = get_unaligned_le16(skb->data);
4446 skb_pull(skb, L2CAP_SDULEN_SIZE);
4448 if (chan->sdu_len > chan->imtu) {
4453 if (skb->len >= chan->sdu_len)
4457 chan->sdu_last_frag = skb;
4463 case L2CAP_SAR_CONTINUE:
4467 append_skb_frag(chan->sdu, skb,
4468 &chan->sdu_last_frag);
4471 if (chan->sdu->len >= chan->sdu_len)
4481 append_skb_frag(chan->sdu, skb,
4482 &chan->sdu_last_frag);
4485 if (chan->sdu->len != chan->sdu_len)
4488 err = chan->ops->recv(chan, chan->sdu);
4491 /* Reassembly complete */
4493 chan->sdu_last_frag = NULL;
4501 kfree_skb(chan->sdu);
4503 chan->sdu_last_frag = NULL;
4510 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4514 if (chan->mode != L2CAP_MODE_ERTM)
4517 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4518 l2cap_tx(chan, NULL, NULL, event);
4521 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4524 /* Pass sequential frames to l2cap_reassemble_sdu()
4525 * until a gap is encountered.
4528 BT_DBG("chan %p", chan);
4530 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4531 struct sk_buff *skb;
4532 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4533 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4535 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4540 skb_unlink(skb, &chan->srej_q);
4541 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4542 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4547 if (skb_queue_empty(&chan->srej_q)) {
4548 chan->rx_state = L2CAP_RX_STATE_RECV;
4549 l2cap_send_ack(chan);
4555 static void l2cap_handle_srej(struct l2cap_chan *chan,
4556 struct l2cap_ctrl *control)
4558 struct sk_buff *skb;
4560 BT_DBG("chan %p, control %p", chan, control);
4562 if (control->reqseq == chan->next_tx_seq) {
4563 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4564 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4568 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4571 BT_DBG("Seq %d not available for retransmission",
4576 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4577 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4578 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4582 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4584 if (control->poll) {
4585 l2cap_pass_to_tx(chan, control);
4587 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4588 l2cap_retransmit(chan, control);
4589 l2cap_ertm_send(chan);
4591 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4592 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4593 chan->srej_save_reqseq = control->reqseq;
4596 l2cap_pass_to_tx_fbit(chan, control);
4598 if (control->final) {
4599 if (chan->srej_save_reqseq != control->reqseq ||
4600 !test_and_clear_bit(CONN_SREJ_ACT,
4602 l2cap_retransmit(chan, control);
4604 l2cap_retransmit(chan, control);
4605 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4606 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4607 chan->srej_save_reqseq = control->reqseq;
4613 static void l2cap_handle_rej(struct l2cap_chan *chan,
4614 struct l2cap_ctrl *control)
4616 struct sk_buff *skb;
4618 BT_DBG("chan %p, control %p", chan, control);
4620 if (control->reqseq == chan->next_tx_seq) {
4621 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4622 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4626 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4628 if (chan->max_tx && skb &&
4629 bt_cb(skb)->control.retries >= chan->max_tx) {
4630 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4631 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4635 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4637 l2cap_pass_to_tx(chan, control);
4639 if (control->final) {
4640 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4641 l2cap_retransmit_all(chan, control);
4643 l2cap_retransmit_all(chan, control);
4644 l2cap_ertm_send(chan);
4645 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4646 set_bit(CONN_REJ_ACT, &chan->conn_state);
4650 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4652 BT_DBG("chan %p, txseq %d", chan, txseq);
4654 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4655 chan->expected_tx_seq);
4657 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4658 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4660 /* See notes below regarding "double poll" and
4663 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4664 BT_DBG("Invalid/Ignore - after SREJ");
4665 return L2CAP_TXSEQ_INVALID_IGNORE;
4667 BT_DBG("Invalid - in window after SREJ sent");
4668 return L2CAP_TXSEQ_INVALID;
4672 if (chan->srej_list.head == txseq) {
4673 BT_DBG("Expected SREJ");
4674 return L2CAP_TXSEQ_EXPECTED_SREJ;
4677 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4678 BT_DBG("Duplicate SREJ - txseq already stored");
4679 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4682 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4683 BT_DBG("Unexpected SREJ - not requested");
4684 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4688 if (chan->expected_tx_seq == txseq) {
4689 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4691 BT_DBG("Invalid - txseq outside tx window");
4692 return L2CAP_TXSEQ_INVALID;
4695 return L2CAP_TXSEQ_EXPECTED;
4699 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4700 __seq_offset(chan, chan->expected_tx_seq,
4701 chan->last_acked_seq)){
4702 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4703 return L2CAP_TXSEQ_DUPLICATE;
4706 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4707 /* A source of invalid packets is a "double poll" condition,
4708 * where delays cause us to send multiple poll packets. If
4709 * the remote stack receives and processes both polls,
4710 * sequence numbers can wrap around in such a way that a
4711 * resent frame has a sequence number that looks like new data
4712 * with a sequence gap. This would trigger an erroneous SREJ
4715 * Fortunately, this is impossible with a tx window that's
4716 * less than half of the maximum sequence number, which allows
4717 * invalid frames to be safely ignored.
4719 * With tx window sizes greater than half of the tx window
4720 * maximum, the frame is invalid and cannot be ignored. This
4721 * causes a disconnect.
4724 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4725 BT_DBG("Invalid/Ignore - txseq outside tx window");
4726 return L2CAP_TXSEQ_INVALID_IGNORE;
4728 BT_DBG("Invalid - txseq outside tx window");
4729 return L2CAP_TXSEQ_INVALID;
4732 BT_DBG("Unexpected - txseq indicates missing frames");
4733 return L2CAP_TXSEQ_UNEXPECTED;
4737 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4738 struct l2cap_ctrl *control,
4739 struct sk_buff *skb, u8 event)
4742 bool skb_in_use = 0;
4744 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4748 case L2CAP_EV_RECV_IFRAME:
4749 switch (l2cap_classify_txseq(chan, control->txseq)) {
4750 case L2CAP_TXSEQ_EXPECTED:
4751 l2cap_pass_to_tx(chan, control);
4753 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4754 BT_DBG("Busy, discarding expected seq %d",
4759 chan->expected_tx_seq = __next_seq(chan,
4762 chan->buffer_seq = chan->expected_tx_seq;
4765 err = l2cap_reassemble_sdu(chan, skb, control);
4769 if (control->final) {
4770 if (!test_and_clear_bit(CONN_REJ_ACT,
4771 &chan->conn_state)) {
4773 l2cap_retransmit_all(chan, control);
4774 l2cap_ertm_send(chan);
4778 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4779 l2cap_send_ack(chan);
4781 case L2CAP_TXSEQ_UNEXPECTED:
4782 l2cap_pass_to_tx(chan, control);
4784 /* Can't issue SREJ frames in the local busy state.
4785 * Drop this frame, it will be seen as missing
4786 * when local busy is exited.
4788 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4789 BT_DBG("Busy, discarding unexpected seq %d",
4794 /* There was a gap in the sequence, so an SREJ
4795 * must be sent for each missing frame. The
4796 * current frame is stored for later use.
4798 skb_queue_tail(&chan->srej_q, skb);
4800 BT_DBG("Queued %p (queue len %d)", skb,
4801 skb_queue_len(&chan->srej_q));
4803 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4804 l2cap_seq_list_clear(&chan->srej_list);
4805 l2cap_send_srej(chan, control->txseq);
4807 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4809 case L2CAP_TXSEQ_DUPLICATE:
4810 l2cap_pass_to_tx(chan, control);
4812 case L2CAP_TXSEQ_INVALID_IGNORE:
4814 case L2CAP_TXSEQ_INVALID:
4816 l2cap_send_disconn_req(chan->conn, chan,
4821 case L2CAP_EV_RECV_RR:
4822 l2cap_pass_to_tx(chan, control);
4823 if (control->final) {
4824 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4826 if (!test_and_clear_bit(CONN_REJ_ACT,
4827 &chan->conn_state)) {
4829 l2cap_retransmit_all(chan, control);
4832 l2cap_ertm_send(chan);
4833 } else if (control->poll) {
4834 l2cap_send_i_or_rr_or_rnr(chan);
4836 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4837 &chan->conn_state) &&
4838 chan->unacked_frames)
4839 __set_retrans_timer(chan);
4841 l2cap_ertm_send(chan);
4844 case L2CAP_EV_RECV_RNR:
4845 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4846 l2cap_pass_to_tx(chan, control);
4847 if (control && control->poll) {
4848 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4849 l2cap_send_rr_or_rnr(chan, 0);
4851 __clear_retrans_timer(chan);
4852 l2cap_seq_list_clear(&chan->retrans_list);
4854 case L2CAP_EV_RECV_REJ:
4855 l2cap_handle_rej(chan, control);
4857 case L2CAP_EV_RECV_SREJ:
4858 l2cap_handle_srej(chan, control);
4864 if (skb && !skb_in_use) {
4865 BT_DBG("Freeing %p", skb);
4872 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4873 struct l2cap_ctrl *control,
4874 struct sk_buff *skb, u8 event)
4877 u16 txseq = control->txseq;
4878 bool skb_in_use = 0;
4880 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4884 case L2CAP_EV_RECV_IFRAME:
4885 switch (l2cap_classify_txseq(chan, txseq)) {
4886 case L2CAP_TXSEQ_EXPECTED:
4887 /* Keep frame for reassembly later */
4888 l2cap_pass_to_tx(chan, control);
4889 skb_queue_tail(&chan->srej_q, skb);
4891 BT_DBG("Queued %p (queue len %d)", skb,
4892 skb_queue_len(&chan->srej_q));
4894 chan->expected_tx_seq = __next_seq(chan, txseq);
4896 case L2CAP_TXSEQ_EXPECTED_SREJ:
4897 l2cap_seq_list_pop(&chan->srej_list);
4899 l2cap_pass_to_tx(chan, control);
4900 skb_queue_tail(&chan->srej_q, skb);
4902 BT_DBG("Queued %p (queue len %d)", skb,
4903 skb_queue_len(&chan->srej_q));
4905 err = l2cap_rx_queued_iframes(chan);
4910 case L2CAP_TXSEQ_UNEXPECTED:
4911 /* Got a frame that can't be reassembled yet.
4912 * Save it for later, and send SREJs to cover
4913 * the missing frames.
4915 skb_queue_tail(&chan->srej_q, skb);
4917 BT_DBG("Queued %p (queue len %d)", skb,
4918 skb_queue_len(&chan->srej_q));
4920 l2cap_pass_to_tx(chan, control);
4921 l2cap_send_srej(chan, control->txseq);
4923 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4924 /* This frame was requested with an SREJ, but
4925 * some expected retransmitted frames are
4926 * missing. Request retransmission of missing
4929 skb_queue_tail(&chan->srej_q, skb);
4931 BT_DBG("Queued %p (queue len %d)", skb,
4932 skb_queue_len(&chan->srej_q));
4934 l2cap_pass_to_tx(chan, control);
4935 l2cap_send_srej_list(chan, control->txseq);
4937 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4938 /* We've already queued this frame. Drop this copy. */
4939 l2cap_pass_to_tx(chan, control);
4941 case L2CAP_TXSEQ_DUPLICATE:
4942 /* Expecting a later sequence number, so this frame
4943 * was already received. Ignore it completely.
4946 case L2CAP_TXSEQ_INVALID_IGNORE:
4948 case L2CAP_TXSEQ_INVALID:
4950 l2cap_send_disconn_req(chan->conn, chan,
4955 case L2CAP_EV_RECV_RR:
4956 l2cap_pass_to_tx(chan, control);
4957 if (control->final) {
4958 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4960 if (!test_and_clear_bit(CONN_REJ_ACT,
4961 &chan->conn_state)) {
4963 l2cap_retransmit_all(chan, control);
4966 l2cap_ertm_send(chan);
4967 } else if (control->poll) {
4968 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4969 &chan->conn_state) &&
4970 chan->unacked_frames) {
4971 __set_retrans_timer(chan);
4974 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4975 l2cap_send_srej_tail(chan);
4977 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4978 &chan->conn_state) &&
4979 chan->unacked_frames)
4980 __set_retrans_timer(chan);
4982 l2cap_send_ack(chan);
4985 case L2CAP_EV_RECV_RNR:
4986 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4987 l2cap_pass_to_tx(chan, control);
4988 if (control->poll) {
4989 l2cap_send_srej_tail(chan);
4991 struct l2cap_ctrl rr_control;
4992 memset(&rr_control, 0, sizeof(rr_control));
4993 rr_control.sframe = 1;
4994 rr_control.super = L2CAP_SUPER_RR;
4995 rr_control.reqseq = chan->buffer_seq;
4996 l2cap_send_sframe(chan, &rr_control);
5000 case L2CAP_EV_RECV_REJ:
5001 l2cap_handle_rej(chan, control);
5003 case L2CAP_EV_RECV_SREJ:
5004 l2cap_handle_srej(chan, control);
5008 if (skb && !skb_in_use) {
5009 BT_DBG("Freeing %p", skb);
5016 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5018 /* Make sure reqseq is for a packet that has been sent but not acked */
5021 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5022 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5025 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5026 struct sk_buff *skb, u8 event)
5030 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5031 control, skb, event, chan->rx_state);
5033 if (__valid_reqseq(chan, control->reqseq)) {
5034 switch (chan->rx_state) {
5035 case L2CAP_RX_STATE_RECV:
5036 err = l2cap_rx_state_recv(chan, control, skb, event);
5038 case L2CAP_RX_STATE_SREJ_SENT:
5039 err = l2cap_rx_state_srej_sent(chan, control, skb,
5047 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5048 control->reqseq, chan->next_tx_seq,
5049 chan->expected_ack_seq);
5050 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5056 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5057 struct sk_buff *skb)
5061 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5064 if (l2cap_classify_txseq(chan, control->txseq) ==
5065 L2CAP_TXSEQ_EXPECTED) {
5066 l2cap_pass_to_tx(chan, control);
5068 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5069 __next_seq(chan, chan->buffer_seq));
5071 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5073 l2cap_reassemble_sdu(chan, skb, control);
5076 kfree_skb(chan->sdu);
5079 chan->sdu_last_frag = NULL;
5083 BT_DBG("Freeing %p", skb);
5088 chan->last_acked_seq = control->txseq;
5089 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5094 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5096 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5100 __unpack_control(chan, skb);
5105 * We can just drop the corrupted I-frame here.
5106 * Receiver will miss it and start proper recovery
5107 * procedures and ask for retransmission.
5109 if (l2cap_check_fcs(chan, skb))
5112 if (!control->sframe && control->sar == L2CAP_SAR_START)
5113 len -= L2CAP_SDULEN_SIZE;
5115 if (chan->fcs == L2CAP_FCS_CRC16)
5116 len -= L2CAP_FCS_SIZE;
5118 if (len > chan->mps) {
5119 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5123 if (!control->sframe) {
5126 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5127 control->sar, control->reqseq, control->final,
5130 /* Validate F-bit - F=0 always valid, F=1 only
5131 * valid in TX WAIT_F
5133 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5136 if (chan->mode != L2CAP_MODE_STREAMING) {
5137 event = L2CAP_EV_RECV_IFRAME;
5138 err = l2cap_rx(chan, control, skb, event);
5140 err = l2cap_stream_rx(chan, control, skb);
5144 l2cap_send_disconn_req(chan->conn, chan,
5147 const u8 rx_func_to_event[4] = {
5148 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5149 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5152 /* Only I-frames are expected in streaming mode */
5153 if (chan->mode == L2CAP_MODE_STREAMING)
5156 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5157 control->reqseq, control->final, control->poll,
5162 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5166 /* Validate F and P bits */
5167 if (control->final && (control->poll ||
5168 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5171 event = rx_func_to_event[control->super];
5172 if (l2cap_rx(chan, control, skb, event))
5173 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5183 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5185 struct l2cap_chan *chan;
5187 chan = l2cap_get_chan_by_scid(conn, cid);
5189 BT_DBG("unknown cid 0x%4.4x", cid);
5190 /* Drop packet and return */
5195 BT_DBG("chan %p, len %d", chan, skb->len);
5197 if (chan->state != BT_CONNECTED)
5200 switch (chan->mode) {
5201 case L2CAP_MODE_BASIC:
5202 /* If socket recv buffers overflows we drop data here
5203 * which is *bad* because L2CAP has to be reliable.
5204 * But we don't have any other choice. L2CAP doesn't
5205 * provide flow control mechanism. */
5207 if (chan->imtu < skb->len)
5210 if (!chan->ops->recv(chan, skb))
5214 case L2CAP_MODE_ERTM:
5215 case L2CAP_MODE_STREAMING:
5216 l2cap_data_rcv(chan, skb);
5220 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5228 l2cap_chan_unlock(chan);
5233 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5235 struct l2cap_chan *chan;
5237 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5241 BT_DBG("chan %p, len %d", chan, skb->len);
5243 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5246 if (chan->imtu < skb->len)
5249 if (!chan->ops->recv(chan, skb))
5258 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5259 struct sk_buff *skb)
5261 struct l2cap_chan *chan;
5263 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5267 BT_DBG("chan %p, len %d", chan, skb->len);
5269 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5272 if (chan->imtu < skb->len)
5275 if (!chan->ops->recv(chan, skb))
5284 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5286 struct l2cap_hdr *lh = (void *) skb->data;
5290 skb_pull(skb, L2CAP_HDR_SIZE);
5291 cid = __le16_to_cpu(lh->cid);
5292 len = __le16_to_cpu(lh->len);
5294 if (len != skb->len) {
5299 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5302 case L2CAP_CID_LE_SIGNALING:
5303 case L2CAP_CID_SIGNALING:
5304 l2cap_sig_channel(conn, skb);
5307 case L2CAP_CID_CONN_LESS:
5308 psm = get_unaligned((__le16 *) skb->data);
5310 l2cap_conless_channel(conn, psm, skb);
5313 case L2CAP_CID_LE_DATA:
5314 l2cap_att_channel(conn, cid, skb);
5318 if (smp_sig_channel(conn, skb))
5319 l2cap_conn_del(conn->hcon, EACCES);
5323 l2cap_data_channel(conn, cid, skb);
5328 /* ---- L2CAP interface with lower layer (HCI) ---- */
5330 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5332 int exact = 0, lm1 = 0, lm2 = 0;
5333 struct l2cap_chan *c;
5335 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5337 /* Find listening sockets and check their link_mode */
5338 read_lock(&chan_list_lock);
5339 list_for_each_entry(c, &chan_list, global_l) {
5340 struct sock *sk = c->sk;
5342 if (c->state != BT_LISTEN)
5345 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5346 lm1 |= HCI_LM_ACCEPT;
5347 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5348 lm1 |= HCI_LM_MASTER;
5350 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5351 lm2 |= HCI_LM_ACCEPT;
5352 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5353 lm2 |= HCI_LM_MASTER;
5356 read_unlock(&chan_list_lock);
5358 return exact ? lm1 : lm2;
5361 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5363 struct l2cap_conn *conn;
5365 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5368 conn = l2cap_conn_add(hcon, status);
5370 l2cap_conn_ready(conn);
5372 l2cap_conn_del(hcon, bt_to_errno(status));
5377 int l2cap_disconn_ind(struct hci_conn *hcon)
5379 struct l2cap_conn *conn = hcon->l2cap_data;
5381 BT_DBG("hcon %p", hcon);
5384 return HCI_ERROR_REMOTE_USER_TERM;
5385 return conn->disc_reason;
5388 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5390 BT_DBG("hcon %p reason %d", hcon, reason);
5392 l2cap_conn_del(hcon, bt_to_errno(reason));
5396 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5398 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5401 if (encrypt == 0x00) {
5402 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5403 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5404 } else if (chan->sec_level == BT_SECURITY_HIGH)
5405 l2cap_chan_close(chan, ECONNREFUSED);
5407 if (chan->sec_level == BT_SECURITY_MEDIUM)
5408 __clear_chan_timer(chan);
5412 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5414 struct l2cap_conn *conn = hcon->l2cap_data;
5415 struct l2cap_chan *chan;
5420 BT_DBG("conn %p", conn);
5422 if (hcon->type == LE_LINK) {
5423 if (!status && encrypt)
5424 smp_distribute_keys(conn, 0);
5425 cancel_delayed_work(&conn->security_timer);
5428 mutex_lock(&conn->chan_lock);
5430 list_for_each_entry(chan, &conn->chan_l, list) {
5431 l2cap_chan_lock(chan);
5433 BT_DBG("chan->scid %d", chan->scid);
5435 if (chan->scid == L2CAP_CID_LE_DATA) {
5436 if (!status && encrypt) {
5437 chan->sec_level = hcon->sec_level;
5438 l2cap_chan_ready(chan);
5441 l2cap_chan_unlock(chan);
5445 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5446 l2cap_chan_unlock(chan);
5450 if (!status && (chan->state == BT_CONNECTED ||
5451 chan->state == BT_CONFIG)) {
5452 struct sock *sk = chan->sk;
5454 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5455 sk->sk_state_change(sk);
5457 l2cap_check_encryption(chan, encrypt);
5458 l2cap_chan_unlock(chan);
5462 if (chan->state == BT_CONNECT) {
5464 l2cap_send_conn_req(chan);
5466 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5468 } else if (chan->state == BT_CONNECT2) {
5469 struct sock *sk = chan->sk;
5470 struct l2cap_conn_rsp rsp;
5476 if (test_bit(BT_SK_DEFER_SETUP,
5477 &bt_sk(sk)->flags)) {
5478 struct sock *parent = bt_sk(sk)->parent;
5479 res = L2CAP_CR_PEND;
5480 stat = L2CAP_CS_AUTHOR_PEND;
5482 parent->sk_data_ready(parent, 0);
5484 __l2cap_state_change(chan, BT_CONFIG);
5485 res = L2CAP_CR_SUCCESS;
5486 stat = L2CAP_CS_NO_INFO;
5489 __l2cap_state_change(chan, BT_DISCONN);
5490 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5491 res = L2CAP_CR_SEC_BLOCK;
5492 stat = L2CAP_CS_NO_INFO;
5497 rsp.scid = cpu_to_le16(chan->dcid);
5498 rsp.dcid = cpu_to_le16(chan->scid);
5499 rsp.result = cpu_to_le16(res);
5500 rsp.status = cpu_to_le16(stat);
5501 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5504 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5505 res == L2CAP_CR_SUCCESS) {
5507 set_bit(CONF_REQ_SENT, &chan->conf_state);
5508 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5510 l2cap_build_conf_req(chan, buf),
5512 chan->num_conf_req++;
5516 l2cap_chan_unlock(chan);
5519 mutex_unlock(&conn->chan_lock);
5524 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5526 struct l2cap_conn *conn = hcon->l2cap_data;
5529 conn = l2cap_conn_add(hcon, 0);
5534 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5536 if (!(flags & ACL_CONT)) {
5537 struct l2cap_hdr *hdr;
5541 BT_ERR("Unexpected start frame (len %d)", skb->len);
5542 kfree_skb(conn->rx_skb);
5543 conn->rx_skb = NULL;
5545 l2cap_conn_unreliable(conn, ECOMM);
5548 /* Start fragment always begin with Basic L2CAP header */
5549 if (skb->len < L2CAP_HDR_SIZE) {
5550 BT_ERR("Frame is too short (len %d)", skb->len);
5551 l2cap_conn_unreliable(conn, ECOMM);
5555 hdr = (struct l2cap_hdr *) skb->data;
5556 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5558 if (len == skb->len) {
5559 /* Complete frame received */
5560 l2cap_recv_frame(conn, skb);
5564 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5566 if (skb->len > len) {
5567 BT_ERR("Frame is too long (len %d, expected len %d)",
5569 l2cap_conn_unreliable(conn, ECOMM);
5573 /* Allocate skb for the complete frame (with header) */
5574 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5578 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5580 conn->rx_len = len - skb->len;
5582 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5584 if (!conn->rx_len) {
5585 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5586 l2cap_conn_unreliable(conn, ECOMM);
5590 if (skb->len > conn->rx_len) {
5591 BT_ERR("Fragment is too long (len %d, expected %d)",
5592 skb->len, conn->rx_len);
5593 kfree_skb(conn->rx_skb);
5594 conn->rx_skb = NULL;
5596 l2cap_conn_unreliable(conn, ECOMM);
5600 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5602 conn->rx_len -= skb->len;
5604 if (!conn->rx_len) {
5605 /* Complete frame received */
5606 l2cap_recv_frame(conn, conn->rx_skb);
5607 conn->rx_skb = NULL;
5616 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5618 struct l2cap_chan *c;
5620 read_lock(&chan_list_lock);
5622 list_for_each_entry(c, &chan_list, global_l) {
5623 struct sock *sk = c->sk;
5625 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5626 batostr(&bt_sk(sk)->src),
5627 batostr(&bt_sk(sk)->dst),
5628 c->state, __le16_to_cpu(c->psm),
5629 c->scid, c->dcid, c->imtu, c->omtu,
5630 c->sec_level, c->mode);
5633 read_unlock(&chan_list_lock);
5638 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5640 return single_open(file, l2cap_debugfs_show, inode->i_private);
5643 static const struct file_operations l2cap_debugfs_fops = {
5644 .open = l2cap_debugfs_open,
5646 .llseek = seq_lseek,
5647 .release = single_release,
5650 static struct dentry *l2cap_debugfs;
5652 int __init l2cap_init(void)
5656 err = l2cap_init_sockets();
5661 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5662 bt_debugfs, NULL, &l2cap_debugfs_fops);
5664 BT_ERR("Failed to create L2CAP debug file");
5670 void l2cap_exit(void)
5672 debugfs_remove(l2cap_debugfs);
5673 l2cap_cleanup_sockets();
5676 module_param(disable_ertm, bool, 0644);
5677 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");