2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
110 mutex_unlock(&conn->chan_lock);
115 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
117 struct l2cap_chan *c;
119 list_for_each_entry(c, &conn->chan_l, list) {
120 if (c->ident == ident)
126 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
128 struct l2cap_chan *c;
130 list_for_each_entry(c, &chan_list, global_l) {
131 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
137 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141 write_lock(&chan_list_lock);
143 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
156 for (p = 0x1001; p < 0x1100; p += 2)
157 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
158 chan->psm = cpu_to_le16(p);
159 chan->sport = cpu_to_le16(p);
166 write_unlock(&chan_list_lock);
170 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
172 write_lock(&chan_list_lock);
176 write_unlock(&chan_list_lock);
181 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
183 u16 cid = L2CAP_CID_DYN_START;
185 for (; cid < L2CAP_CID_DYN_END; cid++) {
186 if (!__l2cap_get_chan_by_scid(conn, cid))
193 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
195 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
196 state_to_string(state));
199 chan->ops->state_change(chan->data, state);
202 static void l2cap_state_change(struct l2cap_chan *chan, int state)
204 struct sock *sk = chan->sk;
207 __l2cap_state_change(chan, state);
211 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
213 struct sock *sk = chan->sk;
218 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
220 struct sock *sk = chan->sk;
223 __l2cap_chan_set_err(chan, err);
227 /* ---- L2CAP sequence number lists ---- */
229 /* For ERTM, ordered lists of sequence numbers must be tracked for
230 * SREJ requests that are received and for frames that are to be
231 * retransmitted. These seq_list functions implement a singly-linked
232 * list in an array, where membership in the list can also be checked
233 * in constant time. Items can also be added to the tail of the list
234 * and removed from the head in constant time, without further memory
238 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
240 size_t alloc_size, i;
242 /* Allocated size is a power of 2 to map sequence numbers
243 * (which may be up to 14 bits) in to a smaller array that is
244 * sized for the negotiated ERTM transmit windows.
246 alloc_size = roundup_pow_of_two(size);
248 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
252 seq_list->mask = alloc_size - 1;
253 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
254 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
255 for (i = 0; i < alloc_size; i++)
256 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
261 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
263 kfree(seq_list->list);
266 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
269 /* Constant-time check for list membership */
270 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
273 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
275 u16 mask = seq_list->mask;
277 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
278 /* In case someone tries to pop the head of an empty list */
279 return L2CAP_SEQ_LIST_CLEAR;
280 } else if (seq_list->head == seq) {
281 /* Head can be removed in constant time */
282 seq_list->head = seq_list->list[seq & mask];
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
285 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
286 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
287 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
290 /* Walk the list to find the sequence number */
291 u16 prev = seq_list->head;
292 while (seq_list->list[prev & mask] != seq) {
293 prev = seq_list->list[prev & mask];
294 if (prev == L2CAP_SEQ_LIST_TAIL)
295 return L2CAP_SEQ_LIST_CLEAR;
298 /* Unlink the number from the list and clear it */
299 seq_list->list[prev & mask] = seq_list->list[seq & mask];
300 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->tail == seq)
302 seq_list->tail = prev;
307 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
309 /* Remove the head in constant time */
310 return l2cap_seq_list_remove(seq_list, seq_list->head);
313 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
317 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
320 for (i = 0; i <= seq_list->mask; i++)
321 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
324 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
327 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
329 u16 mask = seq_list->mask;
331 /* All appends happen in constant time */
333 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
336 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
337 seq_list->head = seq;
339 seq_list->list[seq_list->tail & mask] = seq;
341 seq_list->tail = seq;
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
345 static void l2cap_chan_timeout(struct work_struct *work)
347 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
349 struct l2cap_conn *conn = chan->conn;
352 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
354 mutex_lock(&conn->chan_lock);
355 l2cap_chan_lock(chan);
357 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
358 reason = ECONNREFUSED;
359 else if (chan->state == BT_CONNECT &&
360 chan->sec_level != BT_SECURITY_SDP)
361 reason = ECONNREFUSED;
365 l2cap_chan_close(chan, reason);
367 l2cap_chan_unlock(chan);
369 chan->ops->close(chan->data);
370 mutex_unlock(&conn->chan_lock);
372 l2cap_chan_put(chan);
375 struct l2cap_chan *l2cap_chan_create(void)
377 struct l2cap_chan *chan;
379 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
383 mutex_init(&chan->lock);
385 write_lock(&chan_list_lock);
386 list_add(&chan->global_l, &chan_list);
387 write_unlock(&chan_list_lock);
389 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
391 chan->state = BT_OPEN;
393 atomic_set(&chan->refcnt, 1);
395 BT_DBG("chan %p", chan);
400 void l2cap_chan_destroy(struct l2cap_chan *chan)
402 write_lock(&chan_list_lock);
403 list_del(&chan->global_l);
404 write_unlock(&chan_list_lock);
406 l2cap_chan_put(chan);
409 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
411 chan->fcs = L2CAP_FCS_CRC16;
412 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
413 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
415 chan->sec_level = BT_SECURITY_LOW;
417 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
420 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
422 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
423 __le16_to_cpu(chan->psm), chan->dcid);
425 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
429 switch (chan->chan_type) {
430 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) {
433 chan->omtu = L2CAP_LE_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA;
437 /* Alloc CID for connection-oriented socket */
438 chan->scid = l2cap_alloc_cid(conn);
439 chan->omtu = L2CAP_DEFAULT_MTU;
443 case L2CAP_CHAN_CONN_LESS:
444 /* Connectionless socket */
445 chan->scid = L2CAP_CID_CONN_LESS;
446 chan->dcid = L2CAP_CID_CONN_LESS;
447 chan->omtu = L2CAP_DEFAULT_MTU;
451 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING;
453 chan->dcid = L2CAP_CID_SIGNALING;
454 chan->omtu = L2CAP_DEFAULT_MTU;
457 chan->local_id = L2CAP_BESTEFFORT_ID;
458 chan->local_stype = L2CAP_SERV_BESTEFFORT;
459 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
460 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
461 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
462 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
464 l2cap_chan_hold(chan);
466 list_add(&chan->list, &conn->chan_l);
469 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
471 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock);
476 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
482 __clear_chan_timer(chan);
484 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
487 /* Delete from channel list */
488 list_del(&chan->list);
490 l2cap_chan_put(chan);
493 hci_conn_put(conn->hcon);
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
502 __l2cap_chan_set_err(chan, err);
505 bt_accept_unlink(sk);
506 parent->sk_data_ready(parent, 0);
508 sk->sk_state_change(sk);
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
516 skb_queue_purge(&chan->tx_q);
518 if (chan->mode == L2CAP_MODE_ERTM) {
519 struct srej_list *l, *tmp;
521 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan);
525 skb_queue_purge(&chan->srej_q);
527 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
536 static void l2cap_chan_cleanup_listen(struct sock *parent)
540 BT_DBG("parent %p", parent);
542 /* Close not yet accepted channels */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
546 l2cap_chan_lock(chan);
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
551 chan->ops->close(chan->data);
555 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
557 struct l2cap_conn *conn = chan->conn;
558 struct sock *sk = chan->sk;
560 BT_DBG("chan %p state %s sk %p", chan,
561 state_to_string(chan->state), sk);
563 switch (chan->state) {
566 l2cap_chan_cleanup_listen(sk);
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
575 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
576 conn->hcon->type == ACL_LINK) {
577 __set_chan_timer(chan, sk->sk_sndtimeo);
578 l2cap_send_disconn_req(conn, chan, reason);
580 l2cap_chan_del(chan, reason);
584 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
585 conn->hcon->type == ACL_LINK) {
586 struct l2cap_conn_rsp rsp;
589 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
590 result = L2CAP_CR_SEC_BLOCK;
592 result = L2CAP_CR_BAD_PSM;
593 l2cap_state_change(chan, BT_DISCONN);
595 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
603 l2cap_chan_del(chan, reason);
608 l2cap_chan_del(chan, reason);
613 sock_set_flag(sk, SOCK_ZAPPED);
619 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
621 if (chan->chan_type == L2CAP_CHAN_RAW) {
622 switch (chan->sec_level) {
623 case BT_SECURITY_HIGH:
624 return HCI_AT_DEDICATED_BONDING_MITM;
625 case BT_SECURITY_MEDIUM:
626 return HCI_AT_DEDICATED_BONDING;
628 return HCI_AT_NO_BONDING;
630 } else if (chan->psm == cpu_to_le16(0x0001)) {
631 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP;
634 if (chan->sec_level == BT_SECURITY_HIGH)
635 return HCI_AT_NO_BONDING_MITM;
637 return HCI_AT_NO_BONDING;
639 switch (chan->sec_level) {
640 case BT_SECURITY_HIGH:
641 return HCI_AT_GENERAL_BONDING_MITM;
642 case BT_SECURITY_MEDIUM:
643 return HCI_AT_GENERAL_BONDING;
645 return HCI_AT_NO_BONDING;
650 /* Service level security */
651 int l2cap_chan_check_security(struct l2cap_chan *chan)
653 struct l2cap_conn *conn = chan->conn;
656 auth_type = l2cap_get_auth_type(chan);
658 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
661 static u8 l2cap_get_ident(struct l2cap_conn *conn)
665 /* Get next available identificator.
666 * 1 - 128 are used by kernel.
667 * 129 - 199 are reserved.
668 * 200 - 254 are used by utilities like l2ping, etc.
671 spin_lock(&conn->lock);
673 if (++conn->tx_ident > 128)
678 spin_unlock(&conn->lock);
683 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
685 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
688 BT_DBG("code 0x%2.2x", code);
693 if (lmp_no_flush_capable(conn->hcon->hdev))
694 flags = ACL_START_NO_FLUSH;
698 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
699 skb->priority = HCI_PRIO_MAX;
701 hci_send_acl(conn->hchan, skb, flags);
704 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
706 struct hci_conn *hcon = chan->conn->hcon;
709 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
712 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
713 lmp_no_flush_capable(hcon->hdev))
714 flags = ACL_START_NO_FLUSH;
718 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
719 hci_send_acl(chan->conn->hchan, skb, flags);
722 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
724 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
725 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
727 if (enh & L2CAP_CTRL_FRAME_TYPE) {
730 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
731 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
738 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
739 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
746 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
748 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
749 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
751 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
754 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
755 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
762 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
763 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
770 static inline void __unpack_control(struct l2cap_chan *chan,
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control);
777 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control);
782 static u32 __pack_extended_control(struct l2cap_ctrl *control)
786 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
787 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
789 if (control->sframe) {
790 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
791 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
792 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
794 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
795 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
801 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
805 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_CTRL_FRAME_TYPE;
813 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
820 static inline void __pack_control(struct l2cap_chan *chan,
821 struct l2cap_ctrl *control,
824 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
825 put_unaligned_le32(__pack_extended_control(control),
826 skb->data + L2CAP_HDR_SIZE);
828 put_unaligned_le16(__pack_enhanced_control(control),
829 skb->data + L2CAP_HDR_SIZE);
833 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
840 if (chan->state != BT_CONNECTED)
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE;
846 hlen = L2CAP_ENH_HDR_SIZE;
848 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE;
851 BT_DBG("chan %p, control 0x%8.8x", chan, control);
853 count = min_t(unsigned int, conn->mtu, hlen);
855 control |= __set_sframe(chan);
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
873 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
878 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb);
882 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
886 set_bit(CONN_RNR_SENT, &chan->conn_state);
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
890 control |= __set_reqseq(chan, chan->buffer_seq);
892 l2cap_send_sframe(chan, control);
895 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
897 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
900 static void l2cap_send_conn_req(struct l2cap_chan *chan)
902 struct l2cap_conn *conn = chan->conn;
903 struct l2cap_conn_req req;
905 req.scid = cpu_to_le16(chan->scid);
908 chan->ident = l2cap_get_ident(conn);
910 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
912 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
917 struct sock *sk = chan->sk;
922 parent = bt_sk(sk)->parent;
924 BT_DBG("sk %p, parent %p", sk, parent);
926 chan->conf_state = 0;
927 __clear_chan_timer(chan);
929 __l2cap_state_change(chan, BT_CONNECTED);
930 sk->sk_state_change(sk);
933 parent->sk_data_ready(parent, 0);
938 static void l2cap_do_start(struct l2cap_chan *chan)
940 struct l2cap_conn *conn = chan->conn;
942 if (conn->hcon->type == LE_LINK) {
943 l2cap_chan_ready(chan);
947 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
948 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
951 if (l2cap_chan_check_security(chan) &&
952 __l2cap_no_conn_pending(chan))
953 l2cap_send_conn_req(chan);
955 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn);
961 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
963 l2cap_send_cmd(conn, conn->info_ident,
964 L2CAP_INFO_REQ, sizeof(req), &req);
968 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
970 u32 local_feat_mask = l2cap_feat_mask;
972 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
975 case L2CAP_MODE_ERTM:
976 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
977 case L2CAP_MODE_STREAMING:
978 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
984 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
986 struct sock *sk = chan->sk;
987 struct l2cap_disconn_req req;
992 if (chan->mode == L2CAP_MODE_ERTM) {
993 __clear_retrans_timer(chan);
994 __clear_monitor_timer(chan);
995 __clear_ack_timer(chan);
998 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1001 L2CAP_DISCONN_REQ, sizeof(req), &req);
1004 __l2cap_state_change(chan, BT_DISCONN);
1005 __l2cap_chan_set_err(chan, err);
1009 /* ---- L2CAP connections ---- */
1010 static void l2cap_conn_start(struct l2cap_conn *conn)
1012 struct l2cap_chan *chan, *tmp;
1014 BT_DBG("conn %p", conn);
1016 mutex_lock(&conn->chan_lock);
1018 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1019 struct sock *sk = chan->sk;
1021 l2cap_chan_lock(chan);
1023 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1024 l2cap_chan_unlock(chan);
1028 if (chan->state == BT_CONNECT) {
1029 if (!l2cap_chan_check_security(chan) ||
1030 !__l2cap_no_conn_pending(chan)) {
1031 l2cap_chan_unlock(chan);
1035 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1036 && test_bit(CONF_STATE2_DEVICE,
1037 &chan->conf_state)) {
1038 l2cap_chan_close(chan, ECONNRESET);
1039 l2cap_chan_unlock(chan);
1043 l2cap_send_conn_req(chan);
1045 } else if (chan->state == BT_CONNECT2) {
1046 struct l2cap_conn_rsp rsp;
1048 rsp.scid = cpu_to_le16(chan->dcid);
1049 rsp.dcid = cpu_to_le16(chan->scid);
1051 if (l2cap_chan_check_security(chan)) {
1053 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1059 parent->sk_data_ready(parent, 0);
1062 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1075 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1076 rsp.result != L2CAP_CR_SUCCESS) {
1077 l2cap_chan_unlock(chan);
1081 set_bit(CONF_REQ_SENT, &chan->conf_state);
1082 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1083 l2cap_build_conf_req(chan, buf), buf);
1084 chan->num_conf_req++;
1087 l2cap_chan_unlock(chan);
1090 mutex_unlock(&conn->chan_lock);
1093 /* Find socket with cid and source/destination bdaddr.
1094 * Returns closest match, locked.
1096 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1100 struct l2cap_chan *c, *c1 = NULL;
1102 read_lock(&chan_list_lock);
1104 list_for_each_entry(c, &chan_list, global_l) {
1105 struct sock *sk = c->sk;
1107 if (state && c->state != state)
1110 if (c->scid == cid) {
1111 int src_match, dst_match;
1112 int src_any, dst_any;
1115 src_match = !bacmp(&bt_sk(sk)->src, src);
1116 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1117 if (src_match && dst_match) {
1118 read_unlock(&chan_list_lock);
1123 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1124 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1125 if ((src_match && dst_any) || (src_any && dst_match) ||
1126 (src_any && dst_any))
1131 read_unlock(&chan_list_lock);
1136 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1138 struct sock *parent, *sk;
1139 struct l2cap_chan *chan, *pchan;
1143 /* Check if we have socket listening on cid */
1144 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1145 conn->src, conn->dst);
1153 /* Check for backlog size */
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1159 chan = pchan->ops->new_connection(pchan->data);
1165 hci_conn_hold(conn->hcon);
1167 bacpy(&bt_sk(sk)->src, conn->src);
1168 bacpy(&bt_sk(sk)->dst, conn->dst);
1170 bt_accept_enqueue(parent, sk);
1172 l2cap_chan_add(conn, chan);
1174 __set_chan_timer(chan, sk->sk_sndtimeo);
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1180 release_sock(parent);
1183 static void l2cap_conn_ready(struct l2cap_conn *conn)
1185 struct l2cap_chan *chan;
1187 BT_DBG("conn %p", conn);
1189 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1190 l2cap_le_conn_ready(conn);
1192 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1193 smp_conn_security(conn, conn->hcon->pending_sec_level);
1195 mutex_lock(&conn->chan_lock);
1197 list_for_each_entry(chan, &conn->chan_l, list) {
1199 l2cap_chan_lock(chan);
1201 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan);
1205 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1206 struct sock *sk = chan->sk;
1207 __clear_chan_timer(chan);
1209 __l2cap_state_change(chan, BT_CONNECTED);
1210 sk->sk_state_change(sk);
1213 } else if (chan->state == BT_CONNECT)
1214 l2cap_do_start(chan);
1216 l2cap_chan_unlock(chan);
1219 mutex_unlock(&conn->chan_lock);
1222 /* Notify sockets that we cannot guaranty reliability anymore */
1223 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1225 struct l2cap_chan *chan;
1227 BT_DBG("conn %p", conn);
1229 mutex_lock(&conn->chan_lock);
1231 list_for_each_entry(chan, &conn->chan_l, list) {
1232 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1233 __l2cap_chan_set_err(chan, err);
1236 mutex_unlock(&conn->chan_lock);
1239 static void l2cap_info_timeout(struct work_struct *work)
1241 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1244 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1245 conn->info_ident = 0;
1247 l2cap_conn_start(conn);
1250 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1252 struct l2cap_conn *conn = hcon->l2cap_data;
1253 struct l2cap_chan *chan, *l;
1258 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1260 kfree_skb(conn->rx_skb);
1262 mutex_lock(&conn->chan_lock);
1265 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1266 l2cap_chan_hold(chan);
1267 l2cap_chan_lock(chan);
1269 l2cap_chan_del(chan, err);
1271 l2cap_chan_unlock(chan);
1273 chan->ops->close(chan->data);
1274 l2cap_chan_put(chan);
1277 mutex_unlock(&conn->chan_lock);
1279 hci_chan_del(conn->hchan);
1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1282 cancel_delayed_work_sync(&conn->info_timer);
1284 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1285 cancel_delayed_work_sync(&conn->security_timer);
1286 smp_chan_destroy(conn);
1289 hcon->l2cap_data = NULL;
1293 static void security_timeout(struct work_struct *work)
1295 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1296 security_timer.work);
1298 BT_DBG("conn %p", conn);
1300 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1301 smp_chan_destroy(conn);
1302 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1306 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1308 struct l2cap_conn *conn = hcon->l2cap_data;
1309 struct hci_chan *hchan;
1314 hchan = hci_chan_create(hcon);
1318 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1320 hci_chan_del(hchan);
1324 hcon->l2cap_data = conn;
1326 conn->hchan = hchan;
1328 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1330 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1331 conn->mtu = hcon->hdev->le_mtu;
1333 conn->mtu = hcon->hdev->acl_mtu;
1335 conn->src = &hcon->hdev->bdaddr;
1336 conn->dst = &hcon->dst;
1338 conn->feat_mask = 0;
1340 spin_lock_init(&conn->lock);
1341 mutex_init(&conn->chan_lock);
1343 INIT_LIST_HEAD(&conn->chan_l);
1345 if (hcon->type == LE_LINK)
1346 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1348 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1350 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1355 /* ---- Socket interface ---- */
1357 /* Find socket with psm and source / destination bdaddr.
1358 * Returns closest match.
1360 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1364 struct l2cap_chan *c, *c1 = NULL;
1366 read_lock(&chan_list_lock);
1368 list_for_each_entry(c, &chan_list, global_l) {
1369 struct sock *sk = c->sk;
1371 if (state && c->state != state)
1374 if (c->psm == psm) {
1375 int src_match, dst_match;
1376 int src_any, dst_any;
1379 src_match = !bacmp(&bt_sk(sk)->src, src);
1380 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1381 if (src_match && dst_match) {
1382 read_unlock(&chan_list_lock);
1387 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1388 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1389 if ((src_match && dst_any) || (src_any && dst_match) ||
1390 (src_any && dst_any))
1395 read_unlock(&chan_list_lock);
1400 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1401 bdaddr_t *dst, u8 dst_type)
1403 struct sock *sk = chan->sk;
1404 bdaddr_t *src = &bt_sk(sk)->src;
1405 struct l2cap_conn *conn;
1406 struct hci_conn *hcon;
1407 struct hci_dev *hdev;
1411 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1412 dst_type, __le16_to_cpu(chan->psm));
1414 hdev = hci_get_route(dst, src);
1416 return -EHOSTUNREACH;
1420 l2cap_chan_lock(chan);
1422 /* PSM must be odd and lsb of upper byte must be 0 */
1423 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1424 chan->chan_type != L2CAP_CHAN_RAW) {
1429 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1434 switch (chan->mode) {
1435 case L2CAP_MODE_BASIC:
1437 case L2CAP_MODE_ERTM:
1438 case L2CAP_MODE_STREAMING:
1449 switch (sk->sk_state) {
1453 /* Already connecting */
1459 /* Already connected */
1475 /* Set destination address and psm */
1476 bacpy(&bt_sk(sk)->dst, dst);
1483 auth_type = l2cap_get_auth_type(chan);
1485 if (chan->dcid == L2CAP_CID_LE_DATA)
1486 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1487 chan->sec_level, auth_type);
1489 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1490 chan->sec_level, auth_type);
1493 err = PTR_ERR(hcon);
1497 conn = l2cap_conn_add(hcon, 0);
1504 if (hcon->type == LE_LINK) {
1507 if (!list_empty(&conn->chan_l)) {
1516 /* Update source addr of the socket */
1517 bacpy(src, conn->src);
1519 l2cap_chan_unlock(chan);
1520 l2cap_chan_add(conn, chan);
1521 l2cap_chan_lock(chan);
1523 l2cap_state_change(chan, BT_CONNECT);
1524 __set_chan_timer(chan, sk->sk_sndtimeo);
1526 if (hcon->state == BT_CONNECTED) {
1527 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 __clear_chan_timer(chan);
1529 if (l2cap_chan_check_security(chan))
1530 l2cap_state_change(chan, BT_CONNECTED);
1532 l2cap_do_start(chan);
1538 l2cap_chan_unlock(chan);
1539 hci_dev_unlock(hdev);
1544 int __l2cap_wait_ack(struct sock *sk)
1546 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1547 DECLARE_WAITQUEUE(wait, current);
1551 add_wait_queue(sk_sleep(sk), &wait);
1552 set_current_state(TASK_INTERRUPTIBLE);
1553 while (chan->unacked_frames > 0 && chan->conn) {
1557 if (signal_pending(current)) {
1558 err = sock_intr_errno(timeo);
1563 timeo = schedule_timeout(timeo);
1565 set_current_state(TASK_INTERRUPTIBLE);
1567 err = sock_error(sk);
1571 set_current_state(TASK_RUNNING);
1572 remove_wait_queue(sk_sleep(sk), &wait);
1576 static void l2cap_monitor_timeout(struct work_struct *work)
1578 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1579 monitor_timer.work);
1581 BT_DBG("chan %p", chan);
1583 l2cap_chan_lock(chan);
1585 if (chan->retry_count >= chan->remote_max_tx) {
1586 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1587 l2cap_chan_unlock(chan);
1588 l2cap_chan_put(chan);
1592 chan->retry_count++;
1593 __set_monitor_timer(chan);
1595 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1596 l2cap_chan_unlock(chan);
1597 l2cap_chan_put(chan);
1600 static void l2cap_retrans_timeout(struct work_struct *work)
1602 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1603 retrans_timer.work);
1605 BT_DBG("chan %p", chan);
1607 l2cap_chan_lock(chan);
1609 chan->retry_count = 1;
1610 __set_monitor_timer(chan);
1612 set_bit(CONN_WAIT_F, &chan->conn_state);
1614 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1616 l2cap_chan_unlock(chan);
1617 l2cap_chan_put(chan);
1620 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1622 struct sk_buff *skb;
1624 while ((skb = skb_peek(&chan->tx_q)) &&
1625 chan->unacked_frames) {
1626 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1629 skb = skb_dequeue(&chan->tx_q);
1632 chan->unacked_frames--;
1635 if (!chan->unacked_frames)
1636 __clear_retrans_timer(chan);
1639 static void l2cap_streaming_send(struct l2cap_chan *chan)
1641 struct sk_buff *skb;
1645 while ((skb = skb_dequeue(&chan->tx_q))) {
1646 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1647 control |= __set_txseq(chan, chan->next_tx_seq);
1648 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1649 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1651 if (chan->fcs == L2CAP_FCS_CRC16) {
1652 fcs = crc16(0, (u8 *)skb->data,
1653 skb->len - L2CAP_FCS_SIZE);
1654 put_unaligned_le16(fcs,
1655 skb->data + skb->len - L2CAP_FCS_SIZE);
1658 l2cap_do_send(chan, skb);
1660 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1664 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1666 struct sk_buff *skb, *tx_skb;
1670 skb = skb_peek(&chan->tx_q);
1674 while (bt_cb(skb)->control.txseq != tx_seq) {
1675 if (skb_queue_is_last(&chan->tx_q, skb))
1678 skb = skb_queue_next(&chan->tx_q, skb);
1681 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1682 chan->remote_max_tx) {
1683 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1687 tx_skb = skb_clone(skb, GFP_ATOMIC);
1688 bt_cb(skb)->control.retries++;
1690 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1691 control &= __get_sar_mask(chan);
1693 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1694 control |= __set_ctrl_final(chan);
1696 control |= __set_reqseq(chan, chan->buffer_seq);
1697 control |= __set_txseq(chan, tx_seq);
1699 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1701 if (chan->fcs == L2CAP_FCS_CRC16) {
1702 fcs = crc16(0, (u8 *)tx_skb->data,
1703 tx_skb->len - L2CAP_FCS_SIZE);
1704 put_unaligned_le16(fcs,
1705 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1708 l2cap_do_send(chan, tx_skb);
1711 static int l2cap_ertm_send(struct l2cap_chan *chan)
1713 struct sk_buff *skb, *tx_skb;
1718 if (chan->state != BT_CONNECTED)
1721 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1724 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1726 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1727 chan->remote_max_tx) {
1728 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1732 tx_skb = skb_clone(skb, GFP_ATOMIC);
1734 bt_cb(skb)->control.retries++;
1736 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1737 control &= __get_sar_mask(chan);
1739 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1740 control |= __set_ctrl_final(chan);
1742 control |= __set_reqseq(chan, chan->buffer_seq);
1743 control |= __set_txseq(chan, chan->next_tx_seq);
1744 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1746 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1748 if (chan->fcs == L2CAP_FCS_CRC16) {
1749 fcs = crc16(0, (u8 *)skb->data,
1750 tx_skb->len - L2CAP_FCS_SIZE);
1751 put_unaligned_le16(fcs, skb->data +
1752 tx_skb->len - L2CAP_FCS_SIZE);
1755 l2cap_do_send(chan, tx_skb);
1757 __set_retrans_timer(chan);
1759 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1761 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1763 if (bt_cb(skb)->control.retries == 1) {
1764 chan->unacked_frames++;
1767 __clear_ack_timer(chan);
1770 chan->frames_sent++;
1772 if (skb_queue_is_last(&chan->tx_q, skb))
1773 chan->tx_send_head = NULL;
1775 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1781 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1785 if (!skb_queue_empty(&chan->tx_q))
1786 chan->tx_send_head = chan->tx_q.next;
1788 chan->next_tx_seq = chan->expected_ack_seq;
1789 ret = l2cap_ertm_send(chan);
1793 static void __l2cap_send_ack(struct l2cap_chan *chan)
1797 control |= __set_reqseq(chan, chan->buffer_seq);
1799 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1800 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1801 set_bit(CONN_RNR_SENT, &chan->conn_state);
1802 l2cap_send_sframe(chan, control);
1806 if (l2cap_ertm_send(chan) > 0)
1809 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1810 l2cap_send_sframe(chan, control);
1813 static void l2cap_send_ack(struct l2cap_chan *chan)
1815 __clear_ack_timer(chan);
1816 __l2cap_send_ack(chan);
1819 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1821 struct srej_list *tail;
1824 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1825 control |= __set_ctrl_final(chan);
1827 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1828 control |= __set_reqseq(chan, tail->tx_seq);
1830 l2cap_send_sframe(chan, control);
1833 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1834 struct msghdr *msg, int len,
1835 int count, struct sk_buff *skb)
1837 struct l2cap_conn *conn = chan->conn;
1838 struct sk_buff **frag;
1841 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1847 /* Continuation fragments (no L2CAP header) */
1848 frag = &skb_shinfo(skb)->frag_list;
1850 struct sk_buff *tmp;
1852 count = min_t(unsigned int, conn->mtu, len);
1854 tmp = chan->ops->alloc_skb(chan, count,
1855 msg->msg_flags & MSG_DONTWAIT);
1857 return PTR_ERR(tmp);
1861 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1864 (*frag)->priority = skb->priority;
1869 skb->len += (*frag)->len;
1870 skb->data_len += (*frag)->len;
1872 frag = &(*frag)->next;
1878 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1879 struct msghdr *msg, size_t len,
1882 struct l2cap_conn *conn = chan->conn;
1883 struct sk_buff *skb;
1884 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1885 struct l2cap_hdr *lh;
1887 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1889 count = min_t(unsigned int, (conn->mtu - hlen), len);
1891 skb = chan->ops->alloc_skb(chan, count + hlen,
1892 msg->msg_flags & MSG_DONTWAIT);
1896 skb->priority = priority;
1898 /* Create L2CAP header */
1899 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1900 lh->cid = cpu_to_le16(chan->dcid);
1901 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1902 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1904 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1905 if (unlikely(err < 0)) {
1907 return ERR_PTR(err);
1912 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1913 struct msghdr *msg, size_t len,
1916 struct l2cap_conn *conn = chan->conn;
1917 struct sk_buff *skb;
1919 struct l2cap_hdr *lh;
1921 BT_DBG("chan %p len %d", chan, (int)len);
1923 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1925 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1926 msg->msg_flags & MSG_DONTWAIT);
1930 skb->priority = priority;
1932 /* Create L2CAP header */
1933 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1934 lh->cid = cpu_to_le16(chan->dcid);
1935 lh->len = cpu_to_le16(len);
1937 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1938 if (unlikely(err < 0)) {
1940 return ERR_PTR(err);
1945 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1946 struct msghdr *msg, size_t len,
1949 struct l2cap_conn *conn = chan->conn;
1950 struct sk_buff *skb;
1951 int err, count, hlen;
1952 struct l2cap_hdr *lh;
1954 BT_DBG("chan %p len %d", chan, (int)len);
1957 return ERR_PTR(-ENOTCONN);
1959 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1960 hlen = L2CAP_EXT_HDR_SIZE;
1962 hlen = L2CAP_ENH_HDR_SIZE;
1965 hlen += L2CAP_SDULEN_SIZE;
1967 if (chan->fcs == L2CAP_FCS_CRC16)
1968 hlen += L2CAP_FCS_SIZE;
1970 count = min_t(unsigned int, (conn->mtu - hlen), len);
1972 skb = chan->ops->alloc_skb(chan, count + hlen,
1973 msg->msg_flags & MSG_DONTWAIT);
1977 /* Create L2CAP header */
1978 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1979 lh->cid = cpu_to_le16(chan->dcid);
1980 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1982 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1985 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1987 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1988 if (unlikely(err < 0)) {
1990 return ERR_PTR(err);
1993 if (chan->fcs == L2CAP_FCS_CRC16)
1994 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1996 bt_cb(skb)->control.retries = 0;
2000 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2001 struct sk_buff_head *seg_queue,
2002 struct msghdr *msg, size_t len)
2004 struct sk_buff *skb;
2010 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2012 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2013 * so fragmented skbs are not used. The HCI layer's handling
2014 * of fragmented skbs is not compatible with ERTM's queueing.
2017 /* PDU size is derived from the HCI MTU */
2018 pdu_len = chan->conn->mtu;
2020 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2022 /* Adjust for largest possible L2CAP overhead. */
2023 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2025 /* Remote device may have requested smaller PDUs */
2026 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2028 if (len <= pdu_len) {
2029 sar = L2CAP_SAR_UNSEGMENTED;
2033 sar = L2CAP_SAR_START;
2035 pdu_len -= L2CAP_SDULEN_SIZE;
2039 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2042 __skb_queue_purge(seg_queue);
2043 return PTR_ERR(skb);
2046 bt_cb(skb)->control.sar = sar;
2047 __skb_queue_tail(seg_queue, skb);
2052 pdu_len += L2CAP_SDULEN_SIZE;
2055 if (len <= pdu_len) {
2056 sar = L2CAP_SAR_END;
2059 sar = L2CAP_SAR_CONTINUE;
2066 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2069 struct sk_buff *skb;
2071 struct sk_buff_head seg_queue;
2073 /* Connectionless channel */
2074 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2075 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2077 return PTR_ERR(skb);
2079 l2cap_do_send(chan, skb);
2083 switch (chan->mode) {
2084 case L2CAP_MODE_BASIC:
2085 /* Check outgoing MTU */
2086 if (len > chan->omtu)
2089 /* Create a basic PDU */
2090 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2092 return PTR_ERR(skb);
2094 l2cap_do_send(chan, skb);
2098 case L2CAP_MODE_ERTM:
2099 case L2CAP_MODE_STREAMING:
2100 /* Check outgoing MTU */
2101 if (len > chan->omtu) {
2106 __skb_queue_head_init(&seg_queue);
2108 /* Do segmentation before calling in to the state machine,
2109 * since it's possible to block while waiting for memory
2112 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2114 /* The channel could have been closed while segmenting,
2115 * check that it is still connected.
2117 if (chan->state != BT_CONNECTED) {
2118 __skb_queue_purge(&seg_queue);
2125 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2126 chan->tx_send_head = seg_queue.next;
2127 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2129 if (chan->mode == L2CAP_MODE_ERTM)
2130 err = l2cap_ertm_send(chan);
2132 l2cap_streaming_send(chan);
2137 /* If the skbs were not queued for sending, they'll still be in
2138 * seg_queue and need to be purged.
2140 __skb_queue_purge(&seg_queue);
2144 BT_DBG("bad state %1.1x", chan->mode);
2151 /* Copy frame to all raw sockets on that connection */
2152 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2154 struct sk_buff *nskb;
2155 struct l2cap_chan *chan;
2157 BT_DBG("conn %p", conn);
2159 mutex_lock(&conn->chan_lock);
2161 list_for_each_entry(chan, &conn->chan_l, list) {
2162 struct sock *sk = chan->sk;
2163 if (chan->chan_type != L2CAP_CHAN_RAW)
2166 /* Don't send frame to the socket it came from */
2169 nskb = skb_clone(skb, GFP_ATOMIC);
2173 if (chan->ops->recv(chan->data, nskb))
2177 mutex_unlock(&conn->chan_lock);
2180 /* ---- L2CAP signalling commands ---- */
2181 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2182 u8 code, u8 ident, u16 dlen, void *data)
2184 struct sk_buff *skb, **frag;
2185 struct l2cap_cmd_hdr *cmd;
2186 struct l2cap_hdr *lh;
2189 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2190 conn, code, ident, dlen);
2192 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2193 count = min_t(unsigned int, conn->mtu, len);
2195 skb = bt_skb_alloc(count, GFP_ATOMIC);
2199 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2200 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2202 if (conn->hcon->type == LE_LINK)
2203 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2205 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2207 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2210 cmd->len = cpu_to_le16(dlen);
2213 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2214 memcpy(skb_put(skb, count), data, count);
2220 /* Continuation fragments (no L2CAP header) */
2221 frag = &skb_shinfo(skb)->frag_list;
2223 count = min_t(unsigned int, conn->mtu, len);
2225 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2229 memcpy(skb_put(*frag, count), data, count);
2234 frag = &(*frag)->next;
2244 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2246 struct l2cap_conf_opt *opt = *ptr;
2249 len = L2CAP_CONF_OPT_SIZE + opt->len;
2257 *val = *((u8 *) opt->val);
2261 *val = get_unaligned_le16(opt->val);
2265 *val = get_unaligned_le32(opt->val);
2269 *val = (unsigned long) opt->val;
2273 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2277 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2279 struct l2cap_conf_opt *opt = *ptr;
2281 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2288 *((u8 *) opt->val) = val;
2292 put_unaligned_le16(val, opt->val);
2296 put_unaligned_le32(val, opt->val);
2300 memcpy(opt->val, (void *) val, len);
2304 *ptr += L2CAP_CONF_OPT_SIZE + len;
2307 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2309 struct l2cap_conf_efs efs;
2311 switch (chan->mode) {
2312 case L2CAP_MODE_ERTM:
2313 efs.id = chan->local_id;
2314 efs.stype = chan->local_stype;
2315 efs.msdu = cpu_to_le16(chan->local_msdu);
2316 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2317 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2318 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2321 case L2CAP_MODE_STREAMING:
2323 efs.stype = L2CAP_SERV_BESTEFFORT;
2324 efs.msdu = cpu_to_le16(chan->local_msdu);
2325 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2334 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2335 (unsigned long) &efs);
2338 static void l2cap_ack_timeout(struct work_struct *work)
2340 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2343 BT_DBG("chan %p", chan);
2345 l2cap_chan_lock(chan);
2347 __l2cap_send_ack(chan);
2349 l2cap_chan_unlock(chan);
2351 l2cap_chan_put(chan);
2354 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2358 chan->next_tx_seq = 0;
2359 chan->expected_tx_seq = 0;
2360 chan->expected_ack_seq = 0;
2361 chan->unacked_frames = 0;
2362 chan->buffer_seq = 0;
2363 chan->num_acked = 0;
2364 chan->frames_sent = 0;
2365 chan->last_acked_seq = 0;
2367 chan->sdu_last_frag = NULL;
2370 skb_queue_head_init(&chan->tx_q);
2372 if (chan->mode != L2CAP_MODE_ERTM)
2375 chan->rx_state = L2CAP_RX_STATE_RECV;
2376 chan->tx_state = L2CAP_TX_STATE_XMIT;
2378 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2379 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2380 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2382 skb_queue_head_init(&chan->srej_q);
2384 INIT_LIST_HEAD(&chan->srej_l);
2385 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2389 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2392 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2395 case L2CAP_MODE_STREAMING:
2396 case L2CAP_MODE_ERTM:
2397 if (l2cap_mode_supported(mode, remote_feat_mask))
2401 return L2CAP_MODE_BASIC;
2405 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2407 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2410 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2412 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2415 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2417 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2418 __l2cap_ews_supported(chan)) {
2419 /* use extended control field */
2420 set_bit(FLAG_EXT_CTRL, &chan->flags);
2421 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2423 chan->tx_win = min_t(u16, chan->tx_win,
2424 L2CAP_DEFAULT_TX_WINDOW);
2425 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2429 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2431 struct l2cap_conf_req *req = data;
2432 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2433 void *ptr = req->data;
2436 BT_DBG("chan %p", chan);
2438 if (chan->num_conf_req || chan->num_conf_rsp)
2441 switch (chan->mode) {
2442 case L2CAP_MODE_STREAMING:
2443 case L2CAP_MODE_ERTM:
2444 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2447 if (__l2cap_efs_supported(chan))
2448 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2452 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2457 if (chan->imtu != L2CAP_DEFAULT_MTU)
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2460 switch (chan->mode) {
2461 case L2CAP_MODE_BASIC:
2462 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2463 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2466 rfc.mode = L2CAP_MODE_BASIC;
2468 rfc.max_transmit = 0;
2469 rfc.retrans_timeout = 0;
2470 rfc.monitor_timeout = 0;
2471 rfc.max_pdu_size = 0;
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2474 (unsigned long) &rfc);
2477 case L2CAP_MODE_ERTM:
2478 rfc.mode = L2CAP_MODE_ERTM;
2479 rfc.max_transmit = chan->max_tx;
2480 rfc.retrans_timeout = 0;
2481 rfc.monitor_timeout = 0;
2483 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2484 L2CAP_EXT_HDR_SIZE -
2487 rfc.max_pdu_size = cpu_to_le16(size);
2489 l2cap_txwin_setup(chan);
2491 rfc.txwin_size = min_t(u16, chan->tx_win,
2492 L2CAP_DEFAULT_TX_WINDOW);
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2495 (unsigned long) &rfc);
2497 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2498 l2cap_add_opt_efs(&ptr, chan);
2500 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2503 if (chan->fcs == L2CAP_FCS_NONE ||
2504 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2505 chan->fcs = L2CAP_FCS_NONE;
2506 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2509 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2514 case L2CAP_MODE_STREAMING:
2515 rfc.mode = L2CAP_MODE_STREAMING;
2517 rfc.max_transmit = 0;
2518 rfc.retrans_timeout = 0;
2519 rfc.monitor_timeout = 0;
2521 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2522 L2CAP_EXT_HDR_SIZE -
2525 rfc.max_pdu_size = cpu_to_le16(size);
2527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2528 (unsigned long) &rfc);
2530 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2531 l2cap_add_opt_efs(&ptr, chan);
2533 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2536 if (chan->fcs == L2CAP_FCS_NONE ||
2537 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2538 chan->fcs = L2CAP_FCS_NONE;
2539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2544 req->dcid = cpu_to_le16(chan->dcid);
2545 req->flags = cpu_to_le16(0);
2550 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2552 struct l2cap_conf_rsp *rsp = data;
2553 void *ptr = rsp->data;
2554 void *req = chan->conf_req;
2555 int len = chan->conf_len;
2556 int type, hint, olen;
2558 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2559 struct l2cap_conf_efs efs;
2561 u16 mtu = L2CAP_DEFAULT_MTU;
2562 u16 result = L2CAP_CONF_SUCCESS;
2565 BT_DBG("chan %p", chan);
2567 while (len >= L2CAP_CONF_OPT_SIZE) {
2568 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2570 hint = type & L2CAP_CONF_HINT;
2571 type &= L2CAP_CONF_MASK;
2574 case L2CAP_CONF_MTU:
2578 case L2CAP_CONF_FLUSH_TO:
2579 chan->flush_to = val;
2582 case L2CAP_CONF_QOS:
2585 case L2CAP_CONF_RFC:
2586 if (olen == sizeof(rfc))
2587 memcpy(&rfc, (void *) val, olen);
2590 case L2CAP_CONF_FCS:
2591 if (val == L2CAP_FCS_NONE)
2592 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2595 case L2CAP_CONF_EFS:
2597 if (olen == sizeof(efs))
2598 memcpy(&efs, (void *) val, olen);
2601 case L2CAP_CONF_EWS:
2603 return -ECONNREFUSED;
2605 set_bit(FLAG_EXT_CTRL, &chan->flags);
2606 set_bit(CONF_EWS_RECV, &chan->conf_state);
2607 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2608 chan->remote_tx_win = val;
2615 result = L2CAP_CONF_UNKNOWN;
2616 *((u8 *) ptr++) = type;
2621 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2624 switch (chan->mode) {
2625 case L2CAP_MODE_STREAMING:
2626 case L2CAP_MODE_ERTM:
2627 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2628 chan->mode = l2cap_select_mode(rfc.mode,
2629 chan->conn->feat_mask);
2634 if (__l2cap_efs_supported(chan))
2635 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2637 return -ECONNREFUSED;
2640 if (chan->mode != rfc.mode)
2641 return -ECONNREFUSED;
2647 if (chan->mode != rfc.mode) {
2648 result = L2CAP_CONF_UNACCEPT;
2649 rfc.mode = chan->mode;
2651 if (chan->num_conf_rsp == 1)
2652 return -ECONNREFUSED;
2654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2655 sizeof(rfc), (unsigned long) &rfc);
2658 if (result == L2CAP_CONF_SUCCESS) {
2659 /* Configure output options and let the other side know
2660 * which ones we don't like. */
2662 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2663 result = L2CAP_CONF_UNACCEPT;
2666 set_bit(CONF_MTU_DONE, &chan->conf_state);
2668 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2671 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2672 efs.stype != L2CAP_SERV_NOTRAFIC &&
2673 efs.stype != chan->local_stype) {
2675 result = L2CAP_CONF_UNACCEPT;
2677 if (chan->num_conf_req >= 1)
2678 return -ECONNREFUSED;
2680 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2682 (unsigned long) &efs);
2684 /* Send PENDING Conf Rsp */
2685 result = L2CAP_CONF_PENDING;
2686 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2691 case L2CAP_MODE_BASIC:
2692 chan->fcs = L2CAP_FCS_NONE;
2693 set_bit(CONF_MODE_DONE, &chan->conf_state);
2696 case L2CAP_MODE_ERTM:
2697 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2698 chan->remote_tx_win = rfc.txwin_size;
2700 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2702 chan->remote_max_tx = rfc.max_transmit;
2704 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2706 L2CAP_EXT_HDR_SIZE -
2709 rfc.max_pdu_size = cpu_to_le16(size);
2710 chan->remote_mps = size;
2712 rfc.retrans_timeout =
2713 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2714 rfc.monitor_timeout =
2715 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2717 set_bit(CONF_MODE_DONE, &chan->conf_state);
2719 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2720 sizeof(rfc), (unsigned long) &rfc);
2722 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2723 chan->remote_id = efs.id;
2724 chan->remote_stype = efs.stype;
2725 chan->remote_msdu = le16_to_cpu(efs.msdu);
2726 chan->remote_flush_to =
2727 le32_to_cpu(efs.flush_to);
2728 chan->remote_acc_lat =
2729 le32_to_cpu(efs.acc_lat);
2730 chan->remote_sdu_itime =
2731 le32_to_cpu(efs.sdu_itime);
2732 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2733 sizeof(efs), (unsigned long) &efs);
2737 case L2CAP_MODE_STREAMING:
2738 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2740 L2CAP_EXT_HDR_SIZE -
2743 rfc.max_pdu_size = cpu_to_le16(size);
2744 chan->remote_mps = size;
2746 set_bit(CONF_MODE_DONE, &chan->conf_state);
2748 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2749 sizeof(rfc), (unsigned long) &rfc);
2754 result = L2CAP_CONF_UNACCEPT;
2756 memset(&rfc, 0, sizeof(rfc));
2757 rfc.mode = chan->mode;
2760 if (result == L2CAP_CONF_SUCCESS)
2761 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2763 rsp->scid = cpu_to_le16(chan->dcid);
2764 rsp->result = cpu_to_le16(result);
2765 rsp->flags = cpu_to_le16(0x0000);
2770 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2772 struct l2cap_conf_req *req = data;
2773 void *ptr = req->data;
2776 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2777 struct l2cap_conf_efs efs;
2779 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2781 while (len >= L2CAP_CONF_OPT_SIZE) {
2782 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2785 case L2CAP_CONF_MTU:
2786 if (val < L2CAP_DEFAULT_MIN_MTU) {
2787 *result = L2CAP_CONF_UNACCEPT;
2788 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2794 case L2CAP_CONF_FLUSH_TO:
2795 chan->flush_to = val;
2796 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2800 case L2CAP_CONF_RFC:
2801 if (olen == sizeof(rfc))
2802 memcpy(&rfc, (void *)val, olen);
2804 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2805 rfc.mode != chan->mode)
2806 return -ECONNREFUSED;
2810 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2811 sizeof(rfc), (unsigned long) &rfc);
2814 case L2CAP_CONF_EWS:
2815 chan->tx_win = min_t(u16, val,
2816 L2CAP_DEFAULT_EXT_WINDOW);
2817 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2821 case L2CAP_CONF_EFS:
2822 if (olen == sizeof(efs))
2823 memcpy(&efs, (void *)val, olen);
2825 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2826 efs.stype != L2CAP_SERV_NOTRAFIC &&
2827 efs.stype != chan->local_stype)
2828 return -ECONNREFUSED;
2830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2831 sizeof(efs), (unsigned long) &efs);
2836 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2837 return -ECONNREFUSED;
2839 chan->mode = rfc.mode;
2841 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2843 case L2CAP_MODE_ERTM:
2844 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2845 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2846 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2848 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2849 chan->local_msdu = le16_to_cpu(efs.msdu);
2850 chan->local_sdu_itime =
2851 le32_to_cpu(efs.sdu_itime);
2852 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2853 chan->local_flush_to =
2854 le32_to_cpu(efs.flush_to);
2858 case L2CAP_MODE_STREAMING:
2859 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2863 req->dcid = cpu_to_le16(chan->dcid);
2864 req->flags = cpu_to_le16(0x0000);
2869 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2871 struct l2cap_conf_rsp *rsp = data;
2872 void *ptr = rsp->data;
2874 BT_DBG("chan %p", chan);
2876 rsp->scid = cpu_to_le16(chan->dcid);
2877 rsp->result = cpu_to_le16(result);
2878 rsp->flags = cpu_to_le16(flags);
2883 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2885 struct l2cap_conn_rsp rsp;
2886 struct l2cap_conn *conn = chan->conn;
2889 rsp.scid = cpu_to_le16(chan->dcid);
2890 rsp.dcid = cpu_to_le16(chan->scid);
2891 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2892 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2893 l2cap_send_cmd(conn, chan->ident,
2894 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2896 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2899 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2900 l2cap_build_conf_req(chan, buf), buf);
2901 chan->num_conf_req++;
2904 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2908 struct l2cap_conf_rfc rfc;
2910 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2912 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2915 while (len >= L2CAP_CONF_OPT_SIZE) {
2916 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2918 if (type != L2CAP_CONF_RFC)
2921 if (olen != sizeof(rfc))
2924 memcpy(&rfc, (void *)val, olen);
2928 /* Use sane default values in case a misbehaving remote device
2929 * did not send an RFC option.
2931 rfc.mode = chan->mode;
2932 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2933 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2934 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2936 BT_ERR("Expected RFC option was not found, using defaults");
2940 case L2CAP_MODE_ERTM:
2941 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2942 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2943 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2945 case L2CAP_MODE_STREAMING:
2946 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2950 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2952 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2954 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2957 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2958 cmd->ident == conn->info_ident) {
2959 cancel_delayed_work(&conn->info_timer);
2961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2962 conn->info_ident = 0;
2964 l2cap_conn_start(conn);
2970 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2972 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2973 struct l2cap_conn_rsp rsp;
2974 struct l2cap_chan *chan = NULL, *pchan;
2975 struct sock *parent, *sk = NULL;
2976 int result, status = L2CAP_CS_NO_INFO;
2978 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2979 __le16 psm = req->psm;
2981 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2983 /* Check if we have socket listening on psm */
2984 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2986 result = L2CAP_CR_BAD_PSM;
2992 mutex_lock(&conn->chan_lock);
2995 /* Check if the ACL is secure enough (if not SDP) */
2996 if (psm != cpu_to_le16(0x0001) &&
2997 !hci_conn_check_link_mode(conn->hcon)) {
2998 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2999 result = L2CAP_CR_SEC_BLOCK;
3003 result = L2CAP_CR_NO_MEM;
3005 /* Check for backlog size */
3006 if (sk_acceptq_is_full(parent)) {
3007 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3011 chan = pchan->ops->new_connection(pchan->data);
3017 /* Check if we already have channel with that dcid */
3018 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3019 sock_set_flag(sk, SOCK_ZAPPED);
3020 chan->ops->close(chan->data);
3024 hci_conn_hold(conn->hcon);
3026 bacpy(&bt_sk(sk)->src, conn->src);
3027 bacpy(&bt_sk(sk)->dst, conn->dst);
3031 bt_accept_enqueue(parent, sk);
3033 __l2cap_chan_add(conn, chan);
3037 __set_chan_timer(chan, sk->sk_sndtimeo);
3039 chan->ident = cmd->ident;
3041 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3042 if (l2cap_chan_check_security(chan)) {
3043 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3044 __l2cap_state_change(chan, BT_CONNECT2);
3045 result = L2CAP_CR_PEND;
3046 status = L2CAP_CS_AUTHOR_PEND;
3047 parent->sk_data_ready(parent, 0);
3049 __l2cap_state_change(chan, BT_CONFIG);
3050 result = L2CAP_CR_SUCCESS;
3051 status = L2CAP_CS_NO_INFO;
3054 __l2cap_state_change(chan, BT_CONNECT2);
3055 result = L2CAP_CR_PEND;
3056 status = L2CAP_CS_AUTHEN_PEND;
3059 __l2cap_state_change(chan, BT_CONNECT2);
3060 result = L2CAP_CR_PEND;
3061 status = L2CAP_CS_NO_INFO;
3065 release_sock(parent);
3066 mutex_unlock(&conn->chan_lock);
3069 rsp.scid = cpu_to_le16(scid);
3070 rsp.dcid = cpu_to_le16(dcid);
3071 rsp.result = cpu_to_le16(result);
3072 rsp.status = cpu_to_le16(status);
3073 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3075 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3076 struct l2cap_info_req info;
3077 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3079 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3080 conn->info_ident = l2cap_get_ident(conn);
3082 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3084 l2cap_send_cmd(conn, conn->info_ident,
3085 L2CAP_INFO_REQ, sizeof(info), &info);
3088 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3089 result == L2CAP_CR_SUCCESS) {
3091 set_bit(CONF_REQ_SENT, &chan->conf_state);
3092 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3093 l2cap_build_conf_req(chan, buf), buf);
3094 chan->num_conf_req++;
3100 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3102 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3103 u16 scid, dcid, result, status;
3104 struct l2cap_chan *chan;
3108 scid = __le16_to_cpu(rsp->scid);
3109 dcid = __le16_to_cpu(rsp->dcid);
3110 result = __le16_to_cpu(rsp->result);
3111 status = __le16_to_cpu(rsp->status);
3113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3114 dcid, scid, result, status);
3116 mutex_lock(&conn->chan_lock);
3119 chan = __l2cap_get_chan_by_scid(conn, scid);
3125 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3134 l2cap_chan_lock(chan);
3137 case L2CAP_CR_SUCCESS:
3138 l2cap_state_change(chan, BT_CONFIG);
3141 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3143 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3146 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3147 l2cap_build_conf_req(chan, req), req);
3148 chan->num_conf_req++;
3152 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3156 l2cap_chan_del(chan, ECONNREFUSED);
3160 l2cap_chan_unlock(chan);
3163 mutex_unlock(&conn->chan_lock);
3168 static inline void set_default_fcs(struct l2cap_chan *chan)
3170 /* FCS is enabled only in ERTM or streaming mode, if one or both
3173 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3174 chan->fcs = L2CAP_FCS_NONE;
3175 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3176 chan->fcs = L2CAP_FCS_CRC16;
3179 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3181 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3184 struct l2cap_chan *chan;
3187 dcid = __le16_to_cpu(req->dcid);
3188 flags = __le16_to_cpu(req->flags);
3190 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3192 chan = l2cap_get_chan_by_scid(conn, dcid);
3196 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3197 struct l2cap_cmd_rej_cid rej;
3199 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3200 rej.scid = cpu_to_le16(chan->scid);
3201 rej.dcid = cpu_to_le16(chan->dcid);
3203 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3208 /* Reject if config buffer is too small. */
3209 len = cmd_len - sizeof(*req);
3210 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3211 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3212 l2cap_build_conf_rsp(chan, rsp,
3213 L2CAP_CONF_REJECT, flags), rsp);
3218 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3219 chan->conf_len += len;
3221 if (flags & 0x0001) {
3222 /* Incomplete config. Send empty response. */
3223 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3224 l2cap_build_conf_rsp(chan, rsp,
3225 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3229 /* Complete config. */
3230 len = l2cap_parse_conf_req(chan, rsp);
3232 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3236 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3237 chan->num_conf_rsp++;
3239 /* Reset config buffer. */
3242 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3245 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3246 set_default_fcs(chan);
3248 l2cap_state_change(chan, BT_CONNECTED);
3250 if (chan->mode == L2CAP_MODE_ERTM ||
3251 chan->mode == L2CAP_MODE_STREAMING)
3252 err = l2cap_ertm_init(chan);
3255 l2cap_send_disconn_req(chan->conn, chan, -err);
3257 l2cap_chan_ready(chan);
3262 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3264 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3265 l2cap_build_conf_req(chan, buf), buf);
3266 chan->num_conf_req++;
3269 /* Got Conf Rsp PENDING from remote side and asume we sent
3270 Conf Rsp PENDING in the code above */
3271 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3272 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3274 /* check compatibility */
3276 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3277 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3279 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3280 l2cap_build_conf_rsp(chan, rsp,
3281 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3285 l2cap_chan_unlock(chan);
3289 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3291 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3292 u16 scid, flags, result;
3293 struct l2cap_chan *chan;
3294 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3297 scid = __le16_to_cpu(rsp->scid);
3298 flags = __le16_to_cpu(rsp->flags);
3299 result = __le16_to_cpu(rsp->result);
3301 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3304 chan = l2cap_get_chan_by_scid(conn, scid);
3309 case L2CAP_CONF_SUCCESS:
3310 l2cap_conf_rfc_get(chan, rsp->data, len);
3311 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3314 case L2CAP_CONF_PENDING:
3315 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3317 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3320 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3323 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3327 /* check compatibility */
3329 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3330 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3332 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3333 l2cap_build_conf_rsp(chan, buf,
3334 L2CAP_CONF_SUCCESS, 0x0000), buf);
3338 case L2CAP_CONF_UNACCEPT:
3339 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3342 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3343 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3347 /* throw out any old stored conf requests */
3348 result = L2CAP_CONF_SUCCESS;
3349 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3352 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3356 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3357 L2CAP_CONF_REQ, len, req);
3358 chan->num_conf_req++;
3359 if (result != L2CAP_CONF_SUCCESS)
3365 l2cap_chan_set_err(chan, ECONNRESET);
3367 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3368 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3375 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3377 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3378 set_default_fcs(chan);
3380 l2cap_state_change(chan, BT_CONNECTED);
3381 if (chan->mode == L2CAP_MODE_ERTM ||
3382 chan->mode == L2CAP_MODE_STREAMING)
3383 err = l2cap_ertm_init(chan);
3386 l2cap_send_disconn_req(chan->conn, chan, -err);
3388 l2cap_chan_ready(chan);
3392 l2cap_chan_unlock(chan);
3396 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3398 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3399 struct l2cap_disconn_rsp rsp;
3401 struct l2cap_chan *chan;
3404 scid = __le16_to_cpu(req->scid);
3405 dcid = __le16_to_cpu(req->dcid);
3407 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3409 mutex_lock(&conn->chan_lock);
3411 chan = __l2cap_get_chan_by_scid(conn, dcid);
3413 mutex_unlock(&conn->chan_lock);
3417 l2cap_chan_lock(chan);
3421 rsp.dcid = cpu_to_le16(chan->scid);
3422 rsp.scid = cpu_to_le16(chan->dcid);
3423 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3426 sk->sk_shutdown = SHUTDOWN_MASK;
3429 l2cap_chan_hold(chan);
3430 l2cap_chan_del(chan, ECONNRESET);
3432 l2cap_chan_unlock(chan);
3434 chan->ops->close(chan->data);
3435 l2cap_chan_put(chan);
3437 mutex_unlock(&conn->chan_lock);
3442 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3444 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3446 struct l2cap_chan *chan;
3448 scid = __le16_to_cpu(rsp->scid);
3449 dcid = __le16_to_cpu(rsp->dcid);
3451 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3453 mutex_lock(&conn->chan_lock);
3455 chan = __l2cap_get_chan_by_scid(conn, scid);
3457 mutex_unlock(&conn->chan_lock);
3461 l2cap_chan_lock(chan);
3463 l2cap_chan_hold(chan);
3464 l2cap_chan_del(chan, 0);
3466 l2cap_chan_unlock(chan);
3468 chan->ops->close(chan->data);
3469 l2cap_chan_put(chan);
3471 mutex_unlock(&conn->chan_lock);
3476 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3478 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3481 type = __le16_to_cpu(req->type);
3483 BT_DBG("type 0x%4.4x", type);
3485 if (type == L2CAP_IT_FEAT_MASK) {
3487 u32 feat_mask = l2cap_feat_mask;
3488 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3489 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3490 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3492 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3495 feat_mask |= L2CAP_FEAT_EXT_FLOW
3496 | L2CAP_FEAT_EXT_WINDOW;
3498 put_unaligned_le32(feat_mask, rsp->data);
3499 l2cap_send_cmd(conn, cmd->ident,
3500 L2CAP_INFO_RSP, sizeof(buf), buf);
3501 } else if (type == L2CAP_IT_FIXED_CHAN) {
3503 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3506 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3508 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3510 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3511 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3512 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3513 l2cap_send_cmd(conn, cmd->ident,
3514 L2CAP_INFO_RSP, sizeof(buf), buf);
3516 struct l2cap_info_rsp rsp;
3517 rsp.type = cpu_to_le16(type);
3518 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3519 l2cap_send_cmd(conn, cmd->ident,
3520 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3526 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3528 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3531 type = __le16_to_cpu(rsp->type);
3532 result = __le16_to_cpu(rsp->result);
3534 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3536 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3537 if (cmd->ident != conn->info_ident ||
3538 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3541 cancel_delayed_work(&conn->info_timer);
3543 if (result != L2CAP_IR_SUCCESS) {
3544 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3545 conn->info_ident = 0;
3547 l2cap_conn_start(conn);
3553 case L2CAP_IT_FEAT_MASK:
3554 conn->feat_mask = get_unaligned_le32(rsp->data);
3556 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3557 struct l2cap_info_req req;
3558 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3560 conn->info_ident = l2cap_get_ident(conn);
3562 l2cap_send_cmd(conn, conn->info_ident,
3563 L2CAP_INFO_REQ, sizeof(req), &req);
3565 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3566 conn->info_ident = 0;
3568 l2cap_conn_start(conn);
3572 case L2CAP_IT_FIXED_CHAN:
3573 conn->fixed_chan_mask = rsp->data[0];
3574 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3575 conn->info_ident = 0;
3577 l2cap_conn_start(conn);
3584 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3585 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3588 struct l2cap_create_chan_req *req = data;
3589 struct l2cap_create_chan_rsp rsp;
3592 if (cmd_len != sizeof(*req))
3598 psm = le16_to_cpu(req->psm);
3599 scid = le16_to_cpu(req->scid);
3601 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3603 /* Placeholder: Always reject */
3605 rsp.scid = cpu_to_le16(scid);
3606 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3607 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3609 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3615 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3616 struct l2cap_cmd_hdr *cmd, void *data)
3618 BT_DBG("conn %p", conn);
3620 return l2cap_connect_rsp(conn, cmd, data);
3623 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3624 u16 icid, u16 result)
3626 struct l2cap_move_chan_rsp rsp;
3628 BT_DBG("icid %d, result %d", icid, result);
3630 rsp.icid = cpu_to_le16(icid);
3631 rsp.result = cpu_to_le16(result);
3633 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3636 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3637 struct l2cap_chan *chan, u16 icid, u16 result)
3639 struct l2cap_move_chan_cfm cfm;
3642 BT_DBG("icid %d, result %d", icid, result);
3644 ident = l2cap_get_ident(conn);
3646 chan->ident = ident;
3648 cfm.icid = cpu_to_le16(icid);
3649 cfm.result = cpu_to_le16(result);
3651 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3654 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3657 struct l2cap_move_chan_cfm_rsp rsp;
3659 BT_DBG("icid %d", icid);
3661 rsp.icid = cpu_to_le16(icid);
3662 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3665 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3666 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3668 struct l2cap_move_chan_req *req = data;
3670 u16 result = L2CAP_MR_NOT_ALLOWED;
3672 if (cmd_len != sizeof(*req))
3675 icid = le16_to_cpu(req->icid);
3677 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3682 /* Placeholder: Always refuse */
3683 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3688 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3689 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3691 struct l2cap_move_chan_rsp *rsp = data;
3694 if (cmd_len != sizeof(*rsp))
3697 icid = le16_to_cpu(rsp->icid);
3698 result = le16_to_cpu(rsp->result);
3700 BT_DBG("icid %d, result %d", icid, result);
3702 /* Placeholder: Always unconfirmed */
3703 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3708 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3709 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3711 struct l2cap_move_chan_cfm *cfm = data;
3714 if (cmd_len != sizeof(*cfm))
3717 icid = le16_to_cpu(cfm->icid);
3718 result = le16_to_cpu(cfm->result);
3720 BT_DBG("icid %d, result %d", icid, result);
3722 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3727 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3730 struct l2cap_move_chan_cfm_rsp *rsp = data;
3733 if (cmd_len != sizeof(*rsp))
3736 icid = le16_to_cpu(rsp->icid);
3738 BT_DBG("icid %d", icid);
3743 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3748 if (min > max || min < 6 || max > 3200)
3751 if (to_multiplier < 10 || to_multiplier > 3200)
3754 if (max >= to_multiplier * 8)
3757 max_latency = (to_multiplier * 8 / max) - 1;
3758 if (latency > 499 || latency > max_latency)
3764 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3765 struct l2cap_cmd_hdr *cmd, u8 *data)
3767 struct hci_conn *hcon = conn->hcon;
3768 struct l2cap_conn_param_update_req *req;
3769 struct l2cap_conn_param_update_rsp rsp;
3770 u16 min, max, latency, to_multiplier, cmd_len;
3773 if (!(hcon->link_mode & HCI_LM_MASTER))
3776 cmd_len = __le16_to_cpu(cmd->len);
3777 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3780 req = (struct l2cap_conn_param_update_req *) data;
3781 min = __le16_to_cpu(req->min);
3782 max = __le16_to_cpu(req->max);
3783 latency = __le16_to_cpu(req->latency);
3784 to_multiplier = __le16_to_cpu(req->to_multiplier);
3786 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3787 min, max, latency, to_multiplier);
3789 memset(&rsp, 0, sizeof(rsp));
3791 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3793 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3795 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3797 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3801 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3806 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3807 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3811 switch (cmd->code) {
3812 case L2CAP_COMMAND_REJ:
3813 l2cap_command_rej(conn, cmd, data);
3816 case L2CAP_CONN_REQ:
3817 err = l2cap_connect_req(conn, cmd, data);
3820 case L2CAP_CONN_RSP:
3821 err = l2cap_connect_rsp(conn, cmd, data);
3824 case L2CAP_CONF_REQ:
3825 err = l2cap_config_req(conn, cmd, cmd_len, data);
3828 case L2CAP_CONF_RSP:
3829 err = l2cap_config_rsp(conn, cmd, data);
3832 case L2CAP_DISCONN_REQ:
3833 err = l2cap_disconnect_req(conn, cmd, data);
3836 case L2CAP_DISCONN_RSP:
3837 err = l2cap_disconnect_rsp(conn, cmd, data);
3840 case L2CAP_ECHO_REQ:
3841 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3844 case L2CAP_ECHO_RSP:
3847 case L2CAP_INFO_REQ:
3848 err = l2cap_information_req(conn, cmd, data);
3851 case L2CAP_INFO_RSP:
3852 err = l2cap_information_rsp(conn, cmd, data);
3855 case L2CAP_CREATE_CHAN_REQ:
3856 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3859 case L2CAP_CREATE_CHAN_RSP:
3860 err = l2cap_create_channel_rsp(conn, cmd, data);
3863 case L2CAP_MOVE_CHAN_REQ:
3864 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3867 case L2CAP_MOVE_CHAN_RSP:
3868 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3871 case L2CAP_MOVE_CHAN_CFM:
3872 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3875 case L2CAP_MOVE_CHAN_CFM_RSP:
3876 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3880 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3888 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3889 struct l2cap_cmd_hdr *cmd, u8 *data)
3891 switch (cmd->code) {
3892 case L2CAP_COMMAND_REJ:
3895 case L2CAP_CONN_PARAM_UPDATE_REQ:
3896 return l2cap_conn_param_update_req(conn, cmd, data);
3898 case L2CAP_CONN_PARAM_UPDATE_RSP:
3902 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3907 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3908 struct sk_buff *skb)
3910 u8 *data = skb->data;
3912 struct l2cap_cmd_hdr cmd;
3915 l2cap_raw_recv(conn, skb);
3917 while (len >= L2CAP_CMD_HDR_SIZE) {
3919 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3920 data += L2CAP_CMD_HDR_SIZE;
3921 len -= L2CAP_CMD_HDR_SIZE;
3923 cmd_len = le16_to_cpu(cmd.len);
3925 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3927 if (cmd_len > len || !cmd.ident) {
3928 BT_DBG("corrupted command");
3932 if (conn->hcon->type == LE_LINK)
3933 err = l2cap_le_sig_cmd(conn, &cmd, data);
3935 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3938 struct l2cap_cmd_rej_unk rej;
3940 BT_ERR("Wrong link type (%d)", err);
3942 /* FIXME: Map err to a valid reason */
3943 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3944 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3954 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3956 u16 our_fcs, rcv_fcs;
3959 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3960 hdr_size = L2CAP_EXT_HDR_SIZE;
3962 hdr_size = L2CAP_ENH_HDR_SIZE;
3964 if (chan->fcs == L2CAP_FCS_CRC16) {
3965 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3966 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3967 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3969 if (our_fcs != rcv_fcs)
3975 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3979 chan->frames_sent = 0;
3981 control |= __set_reqseq(chan, chan->buffer_seq);
3983 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3984 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3985 l2cap_send_sframe(chan, control);
3986 set_bit(CONN_RNR_SENT, &chan->conn_state);
3989 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3990 l2cap_retransmit_frames(chan);
3992 l2cap_ertm_send(chan);
3994 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3995 chan->frames_sent == 0) {
3996 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3997 l2cap_send_sframe(chan, control);
4001 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4003 struct sk_buff *next_skb;
4004 int tx_seq_offset, next_tx_seq_offset;
4006 bt_cb(skb)->control.txseq = tx_seq;
4007 bt_cb(skb)->control.sar = sar;
4009 next_skb = skb_peek(&chan->srej_q);
4011 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4014 if (bt_cb(next_skb)->control.txseq == tx_seq)
4017 next_tx_seq_offset = __seq_offset(chan,
4018 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4020 if (next_tx_seq_offset > tx_seq_offset) {
4021 __skb_queue_before(&chan->srej_q, next_skb, skb);
4025 if (skb_queue_is_last(&chan->srej_q, next_skb))
4028 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4031 __skb_queue_tail(&chan->srej_q, skb);
4036 static void append_skb_frag(struct sk_buff *skb,
4037 struct sk_buff *new_frag, struct sk_buff **last_frag)
4039 /* skb->len reflects data in skb as well as all fragments
4040 * skb->data_len reflects only data in fragments
4042 if (!skb_has_frag_list(skb))
4043 skb_shinfo(skb)->frag_list = new_frag;
4045 new_frag->next = NULL;
4047 (*last_frag)->next = new_frag;
4048 *last_frag = new_frag;
4050 skb->len += new_frag->len;
4051 skb->data_len += new_frag->len;
4052 skb->truesize += new_frag->truesize;
4055 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4059 switch (__get_ctrl_sar(chan, control)) {
4060 case L2CAP_SAR_UNSEGMENTED:
4064 err = chan->ops->recv(chan->data, skb);
4067 case L2CAP_SAR_START:
4071 chan->sdu_len = get_unaligned_le16(skb->data);
4072 skb_pull(skb, L2CAP_SDULEN_SIZE);
4074 if (chan->sdu_len > chan->imtu) {
4079 if (skb->len >= chan->sdu_len)
4083 chan->sdu_last_frag = skb;
4089 case L2CAP_SAR_CONTINUE:
4093 append_skb_frag(chan->sdu, skb,
4094 &chan->sdu_last_frag);
4097 if (chan->sdu->len >= chan->sdu_len)
4107 append_skb_frag(chan->sdu, skb,
4108 &chan->sdu_last_frag);
4111 if (chan->sdu->len != chan->sdu_len)
4114 err = chan->ops->recv(chan->data, chan->sdu);
4117 /* Reassembly complete */
4119 chan->sdu_last_frag = NULL;
4127 kfree_skb(chan->sdu);
4129 chan->sdu_last_frag = NULL;
4136 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4138 BT_DBG("chan %p, Enter local busy", chan);
4140 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4141 l2cap_seq_list_clear(&chan->srej_list);
4143 __set_ack_timer(chan);
4146 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4150 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4153 control = __set_reqseq(chan, chan->buffer_seq);
4154 control |= __set_ctrl_poll(chan);
4155 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4156 l2cap_send_sframe(chan, control);
4157 chan->retry_count = 1;
4159 __clear_retrans_timer(chan);
4160 __set_monitor_timer(chan);
4162 set_bit(CONN_WAIT_F, &chan->conn_state);
4165 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4166 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4168 BT_DBG("chan %p, Exit local busy", chan);
4171 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4173 if (chan->mode == L2CAP_MODE_ERTM) {
4175 l2cap_ertm_enter_local_busy(chan);
4177 l2cap_ertm_exit_local_busy(chan);
4181 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4183 struct sk_buff *skb;
4186 while ((skb = skb_peek(&chan->srej_q)) &&
4187 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4190 if (bt_cb(skb)->control.txseq != tx_seq)
4193 skb = skb_dequeue(&chan->srej_q);
4194 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4195 err = l2cap_reassemble_sdu(chan, skb, control);
4198 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4202 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4203 tx_seq = __next_seq(chan, tx_seq);
4207 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4209 struct srej_list *l, *tmp;
4212 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4213 if (l->tx_seq == tx_seq) {
4218 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4219 control |= __set_reqseq(chan, l->tx_seq);
4220 l2cap_send_sframe(chan, control);
4222 list_add_tail(&l->list, &chan->srej_l);
4226 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4228 struct srej_list *new;
4231 while (tx_seq != chan->expected_tx_seq) {
4232 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4233 control |= __set_reqseq(chan, chan->expected_tx_seq);
4234 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4235 l2cap_send_sframe(chan, control);
4237 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4241 new->tx_seq = chan->expected_tx_seq;
4243 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4245 list_add_tail(&new->list, &chan->srej_l);
4248 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4253 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4255 u16 tx_seq = __get_txseq(chan, rx_control);
4256 u16 req_seq = __get_reqseq(chan, rx_control);
4257 u8 sar = __get_ctrl_sar(chan, rx_control);
4258 int tx_seq_offset, expected_tx_seq_offset;
4259 int num_to_ack = (chan->tx_win/6) + 1;
4262 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4263 tx_seq, rx_control);
4265 if (__is_ctrl_final(chan, rx_control) &&
4266 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4267 __clear_monitor_timer(chan);
4268 if (chan->unacked_frames > 0)
4269 __set_retrans_timer(chan);
4270 clear_bit(CONN_WAIT_F, &chan->conn_state);
4273 chan->expected_ack_seq = req_seq;
4274 l2cap_drop_acked_frames(chan);
4276 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4278 /* invalid tx_seq */
4279 if (tx_seq_offset >= chan->tx_win) {
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4285 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4286 l2cap_send_ack(chan);
4290 if (tx_seq == chan->expected_tx_seq)
4293 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4294 struct srej_list *first;
4296 first = list_first_entry(&chan->srej_l,
4297 struct srej_list, list);
4298 if (tx_seq == first->tx_seq) {
4299 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4300 l2cap_check_srej_gap(chan, tx_seq);
4302 list_del(&first->list);
4305 if (list_empty(&chan->srej_l)) {
4306 chan->buffer_seq = chan->buffer_seq_srej;
4307 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4308 l2cap_send_ack(chan);
4309 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4312 struct srej_list *l;
4314 /* duplicated tx_seq */
4315 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4318 list_for_each_entry(l, &chan->srej_l, list) {
4319 if (l->tx_seq == tx_seq) {
4320 l2cap_resend_srejframe(chan, tx_seq);
4325 err = l2cap_send_srejframe(chan, tx_seq);
4327 l2cap_send_disconn_req(chan->conn, chan, -err);
4332 expected_tx_seq_offset = __seq_offset(chan,
4333 chan->expected_tx_seq, chan->buffer_seq);
4335 /* duplicated tx_seq */
4336 if (tx_seq_offset < expected_tx_seq_offset)
4339 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4341 BT_DBG("chan %p, Enter SREJ", chan);
4343 INIT_LIST_HEAD(&chan->srej_l);
4344 chan->buffer_seq_srej = chan->buffer_seq;
4346 __skb_queue_head_init(&chan->srej_q);
4347 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4349 /* Set P-bit only if there are some I-frames to ack. */
4350 if (__clear_ack_timer(chan))
4351 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4353 err = l2cap_send_srejframe(chan, tx_seq);
4355 l2cap_send_disconn_req(chan->conn, chan, -err);
4362 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4364 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4365 bt_cb(skb)->control.txseq = tx_seq;
4366 bt_cb(skb)->control.sar = sar;
4367 __skb_queue_tail(&chan->srej_q, skb);
4371 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4372 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4375 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4379 if (__is_ctrl_final(chan, rx_control)) {
4380 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4381 l2cap_retransmit_frames(chan);
4385 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4386 if (chan->num_acked == num_to_ack - 1)
4387 l2cap_send_ack(chan);
4389 __set_ack_timer(chan);
4398 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4400 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4401 __get_reqseq(chan, rx_control), rx_control);
4403 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4404 l2cap_drop_acked_frames(chan);
4406 if (__is_ctrl_poll(chan, rx_control)) {
4407 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4408 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4409 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4410 (chan->unacked_frames > 0))
4411 __set_retrans_timer(chan);
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4414 l2cap_send_srejtail(chan);
4416 l2cap_send_i_or_rr_or_rnr(chan);
4419 } else if (__is_ctrl_final(chan, rx_control)) {
4420 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4422 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4423 l2cap_retransmit_frames(chan);
4426 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4427 (chan->unacked_frames > 0))
4428 __set_retrans_timer(chan);
4430 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4431 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4432 l2cap_send_ack(chan);
4434 l2cap_ertm_send(chan);
4438 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4440 u16 tx_seq = __get_reqseq(chan, rx_control);
4442 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4444 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4446 chan->expected_ack_seq = tx_seq;
4447 l2cap_drop_acked_frames(chan);
4449 if (__is_ctrl_final(chan, rx_control)) {
4450 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4451 l2cap_retransmit_frames(chan);
4453 l2cap_retransmit_frames(chan);
4455 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4456 set_bit(CONN_REJ_ACT, &chan->conn_state);
4459 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4461 u16 tx_seq = __get_reqseq(chan, rx_control);
4463 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4465 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4467 if (__is_ctrl_poll(chan, rx_control)) {
4468 chan->expected_ack_seq = tx_seq;
4469 l2cap_drop_acked_frames(chan);
4471 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4472 l2cap_retransmit_one_frame(chan, tx_seq);
4474 l2cap_ertm_send(chan);
4476 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4477 chan->srej_save_reqseq = tx_seq;
4478 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4480 } else if (__is_ctrl_final(chan, rx_control)) {
4481 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4482 chan->srej_save_reqseq == tx_seq)
4483 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4485 l2cap_retransmit_one_frame(chan, tx_seq);
4487 l2cap_retransmit_one_frame(chan, tx_seq);
4488 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4489 chan->srej_save_reqseq = tx_seq;
4490 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4495 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4497 u16 tx_seq = __get_reqseq(chan, rx_control);
4499 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4501 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4502 chan->expected_ack_seq = tx_seq;
4503 l2cap_drop_acked_frames(chan);
4505 if (__is_ctrl_poll(chan, rx_control))
4506 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4508 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4509 __clear_retrans_timer(chan);
4510 if (__is_ctrl_poll(chan, rx_control))
4511 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4515 if (__is_ctrl_poll(chan, rx_control)) {
4516 l2cap_send_srejtail(chan);
4518 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4519 l2cap_send_sframe(chan, rx_control);
4523 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4525 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4527 if (__is_ctrl_final(chan, rx_control) &&
4528 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4529 __clear_monitor_timer(chan);
4530 if (chan->unacked_frames > 0)
4531 __set_retrans_timer(chan);
4532 clear_bit(CONN_WAIT_F, &chan->conn_state);
4535 switch (__get_ctrl_super(chan, rx_control)) {
4536 case L2CAP_SUPER_RR:
4537 l2cap_data_channel_rrframe(chan, rx_control);
4540 case L2CAP_SUPER_REJ:
4541 l2cap_data_channel_rejframe(chan, rx_control);
4544 case L2CAP_SUPER_SREJ:
4545 l2cap_data_channel_srejframe(chan, rx_control);
4548 case L2CAP_SUPER_RNR:
4549 l2cap_data_channel_rnrframe(chan, rx_control);
4557 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4561 int len, next_tx_seq_offset, req_seq_offset;
4563 __unpack_control(chan, skb);
4565 control = __get_control(chan, skb->data);
4566 skb_pull(skb, __ctrl_size(chan));
4570 * We can just drop the corrupted I-frame here.
4571 * Receiver will miss it and start proper recovery
4572 * procedures and ask retransmission.
4574 if (l2cap_check_fcs(chan, skb))
4577 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4578 len -= L2CAP_SDULEN_SIZE;
4580 if (chan->fcs == L2CAP_FCS_CRC16)
4581 len -= L2CAP_FCS_SIZE;
4583 if (len > chan->mps) {
4584 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4588 req_seq = __get_reqseq(chan, control);
4590 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4592 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4593 chan->expected_ack_seq);
4595 /* check for invalid req-seq */
4596 if (req_seq_offset > next_tx_seq_offset) {
4597 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4601 if (!__is_sframe(chan, control)) {
4603 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4607 l2cap_data_channel_iframe(chan, control, skb);
4611 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4615 l2cap_data_channel_sframe(chan, control, skb);
4625 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4627 struct l2cap_chan *chan;
4632 chan = l2cap_get_chan_by_scid(conn, cid);
4634 BT_DBG("unknown cid 0x%4.4x", cid);
4635 /* Drop packet and return */
4640 BT_DBG("chan %p, len %d", chan, skb->len);
4642 if (chan->state != BT_CONNECTED)
4645 switch (chan->mode) {
4646 case L2CAP_MODE_BASIC:
4647 /* If socket recv buffers overflows we drop data here
4648 * which is *bad* because L2CAP has to be reliable.
4649 * But we don't have any other choice. L2CAP doesn't
4650 * provide flow control mechanism. */
4652 if (chan->imtu < skb->len)
4655 if (!chan->ops->recv(chan->data, skb))
4659 case L2CAP_MODE_ERTM:
4660 l2cap_ertm_data_rcv(chan, skb);
4664 case L2CAP_MODE_STREAMING:
4665 control = __get_control(chan, skb->data);
4666 skb_pull(skb, __ctrl_size(chan));
4669 if (l2cap_check_fcs(chan, skb))
4672 if (__is_sar_start(chan, control))
4673 len -= L2CAP_SDULEN_SIZE;
4675 if (chan->fcs == L2CAP_FCS_CRC16)
4676 len -= L2CAP_FCS_SIZE;
4678 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4681 tx_seq = __get_txseq(chan, control);
4683 if (chan->expected_tx_seq != tx_seq) {
4684 /* Frame(s) missing - must discard partial SDU */
4685 kfree_skb(chan->sdu);
4687 chan->sdu_last_frag = NULL;
4690 /* TODO: Notify userland of missing data */
4693 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4695 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4696 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4701 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4709 l2cap_chan_unlock(chan);
4714 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4716 struct l2cap_chan *chan;
4718 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4722 BT_DBG("chan %p, len %d", chan, skb->len);
4724 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4727 if (chan->imtu < skb->len)
4730 if (!chan->ops->recv(chan->data, skb))
4739 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4740 struct sk_buff *skb)
4742 struct l2cap_chan *chan;
4744 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4748 BT_DBG("chan %p, len %d", chan, skb->len);
4750 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4753 if (chan->imtu < skb->len)
4756 if (!chan->ops->recv(chan->data, skb))
4765 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4767 struct l2cap_hdr *lh = (void *) skb->data;
4771 skb_pull(skb, L2CAP_HDR_SIZE);
4772 cid = __le16_to_cpu(lh->cid);
4773 len = __le16_to_cpu(lh->len);
4775 if (len != skb->len) {
4780 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4783 case L2CAP_CID_LE_SIGNALING:
4784 case L2CAP_CID_SIGNALING:
4785 l2cap_sig_channel(conn, skb);
4788 case L2CAP_CID_CONN_LESS:
4789 psm = get_unaligned((__le16 *) skb->data);
4791 l2cap_conless_channel(conn, psm, skb);
4794 case L2CAP_CID_LE_DATA:
4795 l2cap_att_channel(conn, cid, skb);
4799 if (smp_sig_channel(conn, skb))
4800 l2cap_conn_del(conn->hcon, EACCES);
4804 l2cap_data_channel(conn, cid, skb);
4809 /* ---- L2CAP interface with lower layer (HCI) ---- */
4811 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4813 int exact = 0, lm1 = 0, lm2 = 0;
4814 struct l2cap_chan *c;
4816 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4818 /* Find listening sockets and check their link_mode */
4819 read_lock(&chan_list_lock);
4820 list_for_each_entry(c, &chan_list, global_l) {
4821 struct sock *sk = c->sk;
4823 if (c->state != BT_LISTEN)
4826 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4827 lm1 |= HCI_LM_ACCEPT;
4828 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4829 lm1 |= HCI_LM_MASTER;
4831 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4832 lm2 |= HCI_LM_ACCEPT;
4833 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4834 lm2 |= HCI_LM_MASTER;
4837 read_unlock(&chan_list_lock);
4839 return exact ? lm1 : lm2;
4842 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4844 struct l2cap_conn *conn;
4846 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4849 conn = l2cap_conn_add(hcon, status);
4851 l2cap_conn_ready(conn);
4853 l2cap_conn_del(hcon, bt_to_errno(status));
4858 int l2cap_disconn_ind(struct hci_conn *hcon)
4860 struct l2cap_conn *conn = hcon->l2cap_data;
4862 BT_DBG("hcon %p", hcon);
4865 return HCI_ERROR_REMOTE_USER_TERM;
4866 return conn->disc_reason;
4869 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4871 BT_DBG("hcon %p reason %d", hcon, reason);
4873 l2cap_conn_del(hcon, bt_to_errno(reason));
4877 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4879 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4882 if (encrypt == 0x00) {
4883 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4884 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4885 } else if (chan->sec_level == BT_SECURITY_HIGH)
4886 l2cap_chan_close(chan, ECONNREFUSED);
4888 if (chan->sec_level == BT_SECURITY_MEDIUM)
4889 __clear_chan_timer(chan);
4893 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4895 struct l2cap_conn *conn = hcon->l2cap_data;
4896 struct l2cap_chan *chan;
4901 BT_DBG("conn %p", conn);
4903 if (hcon->type == LE_LINK) {
4904 if (!status && encrypt)
4905 smp_distribute_keys(conn, 0);
4906 cancel_delayed_work(&conn->security_timer);
4909 mutex_lock(&conn->chan_lock);
4911 list_for_each_entry(chan, &conn->chan_l, list) {
4912 l2cap_chan_lock(chan);
4914 BT_DBG("chan->scid %d", chan->scid);
4916 if (chan->scid == L2CAP_CID_LE_DATA) {
4917 if (!status && encrypt) {
4918 chan->sec_level = hcon->sec_level;
4919 l2cap_chan_ready(chan);
4922 l2cap_chan_unlock(chan);
4926 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4927 l2cap_chan_unlock(chan);
4931 if (!status && (chan->state == BT_CONNECTED ||
4932 chan->state == BT_CONFIG)) {
4933 struct sock *sk = chan->sk;
4935 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
4936 sk->sk_state_change(sk);
4938 l2cap_check_encryption(chan, encrypt);
4939 l2cap_chan_unlock(chan);
4943 if (chan->state == BT_CONNECT) {
4945 l2cap_send_conn_req(chan);
4947 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4949 } else if (chan->state == BT_CONNECT2) {
4950 struct sock *sk = chan->sk;
4951 struct l2cap_conn_rsp rsp;
4957 if (test_bit(BT_SK_DEFER_SETUP,
4958 &bt_sk(sk)->flags)) {
4959 struct sock *parent = bt_sk(sk)->parent;
4960 res = L2CAP_CR_PEND;
4961 stat = L2CAP_CS_AUTHOR_PEND;
4963 parent->sk_data_ready(parent, 0);
4965 __l2cap_state_change(chan, BT_CONFIG);
4966 res = L2CAP_CR_SUCCESS;
4967 stat = L2CAP_CS_NO_INFO;
4970 __l2cap_state_change(chan, BT_DISCONN);
4971 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4972 res = L2CAP_CR_SEC_BLOCK;
4973 stat = L2CAP_CS_NO_INFO;
4978 rsp.scid = cpu_to_le16(chan->dcid);
4979 rsp.dcid = cpu_to_le16(chan->scid);
4980 rsp.result = cpu_to_le16(res);
4981 rsp.status = cpu_to_le16(stat);
4982 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4986 l2cap_chan_unlock(chan);
4989 mutex_unlock(&conn->chan_lock);
4994 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4996 struct l2cap_conn *conn = hcon->l2cap_data;
4999 conn = l2cap_conn_add(hcon, 0);
5004 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5006 if (!(flags & ACL_CONT)) {
5007 struct l2cap_hdr *hdr;
5011 BT_ERR("Unexpected start frame (len %d)", skb->len);
5012 kfree_skb(conn->rx_skb);
5013 conn->rx_skb = NULL;
5015 l2cap_conn_unreliable(conn, ECOMM);
5018 /* Start fragment always begin with Basic L2CAP header */
5019 if (skb->len < L2CAP_HDR_SIZE) {
5020 BT_ERR("Frame is too short (len %d)", skb->len);
5021 l2cap_conn_unreliable(conn, ECOMM);
5025 hdr = (struct l2cap_hdr *) skb->data;
5026 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5028 if (len == skb->len) {
5029 /* Complete frame received */
5030 l2cap_recv_frame(conn, skb);
5034 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5036 if (skb->len > len) {
5037 BT_ERR("Frame is too long (len %d, expected len %d)",
5039 l2cap_conn_unreliable(conn, ECOMM);
5043 /* Allocate skb for the complete frame (with header) */
5044 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5048 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5050 conn->rx_len = len - skb->len;
5052 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5054 if (!conn->rx_len) {
5055 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5056 l2cap_conn_unreliable(conn, ECOMM);
5060 if (skb->len > conn->rx_len) {
5061 BT_ERR("Fragment is too long (len %d, expected %d)",
5062 skb->len, conn->rx_len);
5063 kfree_skb(conn->rx_skb);
5064 conn->rx_skb = NULL;
5066 l2cap_conn_unreliable(conn, ECOMM);
5070 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5072 conn->rx_len -= skb->len;
5074 if (!conn->rx_len) {
5075 /* Complete frame received */
5076 l2cap_recv_frame(conn, conn->rx_skb);
5077 conn->rx_skb = NULL;
5086 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5088 struct l2cap_chan *c;
5090 read_lock(&chan_list_lock);
5092 list_for_each_entry(c, &chan_list, global_l) {
5093 struct sock *sk = c->sk;
5095 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5096 batostr(&bt_sk(sk)->src),
5097 batostr(&bt_sk(sk)->dst),
5098 c->state, __le16_to_cpu(c->psm),
5099 c->scid, c->dcid, c->imtu, c->omtu,
5100 c->sec_level, c->mode);
5103 read_unlock(&chan_list_lock);
5108 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5110 return single_open(file, l2cap_debugfs_show, inode->i_private);
5113 static const struct file_operations l2cap_debugfs_fops = {
5114 .open = l2cap_debugfs_open,
5116 .llseek = seq_lseek,
5117 .release = single_release,
5120 static struct dentry *l2cap_debugfs;
5122 int __init l2cap_init(void)
5126 err = l2cap_init_sockets();
5131 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5132 bt_debugfs, NULL, &l2cap_debugfs_fops);
5134 BT_ERR("Failed to create L2CAP debug file");
5140 void l2cap_exit(void)
5142 debugfs_remove(l2cap_debugfs);
5143 l2cap_cleanup_sockets();
5146 module_param(disable_ertm, bool, 0644);
5147 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");