2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92 list_for_each_entry(c, &conn->chan_l, list) {
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 struct l2cap_chan *c;
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 struct l2cap_chan *c;
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
149 write_lock(&chan_list_lock);
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
174 write_unlock(&chan_list_lock);
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 write_lock(&chan_list_lock);
184 write_unlock(&chan_list_lock);
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 u16 cid = L2CAP_CID_DYN_START;
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
207 chan->ops->state_change(chan->data, state);
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 struct sock *sk = chan->sk;
215 __l2cap_state_change(chan, state);
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 struct sock *sk = chan->sk;
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 struct sock *sk = chan->sk;
231 __l2cap_chan_set_err(chan, err);
235 /* ---- L2CAP sequence number lists ---- */
237 /* For ERTM, ordered lists of sequence numbers must be tracked for
238 * SREJ requests that are received and for frames that are to be
239 * retransmitted. These seq_list functions implement a singly-linked
240 * list in an array, where membership in the list can also be checked
241 * in constant time. Items can also be added to the tail of the list
242 * and removed from the head in constant time, without further memory
246 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
248 size_t alloc_size, i;
250 /* Allocated size is a power of 2 to map sequence numbers
251 * (which may be up to 14 bits) in to a smaller array that is
252 * sized for the negotiated ERTM transmit windows.
254 alloc_size = roundup_pow_of_two(size);
256 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
260 seq_list->mask = alloc_size - 1;
261 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
262 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
263 for (i = 0; i < alloc_size; i++)
264 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
269 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
271 kfree(seq_list->list);
274 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
277 /* Constant-time check for list membership */
278 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
281 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
283 u16 mask = seq_list->mask;
285 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
286 /* In case someone tries to pop the head of an empty list */
287 return L2CAP_SEQ_LIST_CLEAR;
288 } else if (seq_list->head == seq) {
289 /* Head can be removed in constant time */
290 seq_list->head = seq_list->list[seq & mask];
291 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
293 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
294 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
295 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
298 /* Walk the list to find the sequence number */
299 u16 prev = seq_list->head;
300 while (seq_list->list[prev & mask] != seq) {
301 prev = seq_list->list[prev & mask];
302 if (prev == L2CAP_SEQ_LIST_TAIL)
303 return L2CAP_SEQ_LIST_CLEAR;
306 /* Unlink the number from the list and clear it */
307 seq_list->list[prev & mask] = seq_list->list[seq & mask];
308 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
309 if (seq_list->tail == seq)
310 seq_list->tail = prev;
315 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
317 /* Remove the head in constant time */
318 return l2cap_seq_list_remove(seq_list, seq_list->head);
321 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
323 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
325 for (i = 0; i <= seq_list->mask; i++)
326 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
333 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
335 u16 mask = seq_list->mask;
337 /* All appends happen in constant time */
339 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
340 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
341 seq_list->head = seq;
343 seq_list->list[seq_list->tail & mask] = seq;
345 seq_list->tail = seq;
346 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
350 static void l2cap_chan_timeout(struct work_struct *work)
352 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
354 struct l2cap_conn *conn = chan->conn;
357 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
359 mutex_lock(&conn->chan_lock);
360 l2cap_chan_lock(chan);
362 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
363 reason = ECONNREFUSED;
364 else if (chan->state == BT_CONNECT &&
365 chan->sec_level != BT_SECURITY_SDP)
366 reason = ECONNREFUSED;
370 l2cap_chan_close(chan, reason);
372 l2cap_chan_unlock(chan);
374 chan->ops->close(chan->data);
375 mutex_unlock(&conn->chan_lock);
377 l2cap_chan_put(chan);
380 struct l2cap_chan *l2cap_chan_create(void)
382 struct l2cap_chan *chan;
384 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
388 mutex_init(&chan->lock);
390 write_lock(&chan_list_lock);
391 list_add(&chan->global_l, &chan_list);
392 write_unlock(&chan_list_lock);
394 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
396 chan->state = BT_OPEN;
398 atomic_set(&chan->refcnt, 1);
400 BT_DBG("chan %p", chan);
405 void l2cap_chan_destroy(struct l2cap_chan *chan)
407 write_lock(&chan_list_lock);
408 list_del(&chan->global_l);
409 write_unlock(&chan_list_lock);
411 l2cap_chan_put(chan);
414 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
416 chan->fcs = L2CAP_FCS_CRC16;
417 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
418 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
419 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
420 chan->sec_level = BT_SECURITY_LOW;
422 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
425 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
427 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
428 __le16_to_cpu(chan->psm), chan->dcid);
430 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
434 switch (chan->chan_type) {
435 case L2CAP_CHAN_CONN_ORIENTED:
436 if (conn->hcon->type == LE_LINK) {
438 chan->omtu = L2CAP_LE_DEFAULT_MTU;
439 chan->scid = L2CAP_CID_LE_DATA;
440 chan->dcid = L2CAP_CID_LE_DATA;
442 /* Alloc CID for connection-oriented socket */
443 chan->scid = l2cap_alloc_cid(conn);
444 chan->omtu = L2CAP_DEFAULT_MTU;
448 case L2CAP_CHAN_CONN_LESS:
449 /* Connectionless socket */
450 chan->scid = L2CAP_CID_CONN_LESS;
451 chan->dcid = L2CAP_CID_CONN_LESS;
452 chan->omtu = L2CAP_DEFAULT_MTU;
456 /* Raw socket can send/recv signalling messages only */
457 chan->scid = L2CAP_CID_SIGNALING;
458 chan->dcid = L2CAP_CID_SIGNALING;
459 chan->omtu = L2CAP_DEFAULT_MTU;
462 chan->local_id = L2CAP_BESTEFFORT_ID;
463 chan->local_stype = L2CAP_SERV_BESTEFFORT;
464 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
465 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
466 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
467 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
469 l2cap_chan_hold(chan);
471 list_add(&chan->list, &conn->chan_l);
474 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
476 mutex_lock(&conn->chan_lock);
477 __l2cap_chan_add(conn, chan);
478 mutex_unlock(&conn->chan_lock);
481 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
483 struct sock *sk = chan->sk;
484 struct l2cap_conn *conn = chan->conn;
485 struct sock *parent = bt_sk(sk)->parent;
487 __clear_chan_timer(chan);
489 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
492 /* Delete from channel list */
493 list_del(&chan->list);
495 l2cap_chan_put(chan);
498 hci_conn_put(conn->hcon);
503 __l2cap_state_change(chan, BT_CLOSED);
504 sock_set_flag(sk, SOCK_ZAPPED);
507 __l2cap_chan_set_err(chan, err);
510 bt_accept_unlink(sk);
511 parent->sk_data_ready(parent, 0);
513 sk->sk_state_change(sk);
517 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
518 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
521 skb_queue_purge(&chan->tx_q);
523 if (chan->mode == L2CAP_MODE_ERTM) {
524 struct srej_list *l, *tmp;
526 __clear_retrans_timer(chan);
527 __clear_monitor_timer(chan);
528 __clear_ack_timer(chan);
530 skb_queue_purge(&chan->srej_q);
532 l2cap_seq_list_free(&chan->srej_list);
533 l2cap_seq_list_free(&chan->retrans_list);
534 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
541 static void l2cap_chan_cleanup_listen(struct sock *parent)
545 BT_DBG("parent %p", parent);
547 /* Close not yet accepted channels */
548 while ((sk = bt_accept_dequeue(parent, NULL))) {
549 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
551 l2cap_chan_lock(chan);
552 __clear_chan_timer(chan);
553 l2cap_chan_close(chan, ECONNRESET);
554 l2cap_chan_unlock(chan);
556 chan->ops->close(chan->data);
560 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
562 struct l2cap_conn *conn = chan->conn;
563 struct sock *sk = chan->sk;
565 BT_DBG("chan %p state %s sk %p", chan,
566 state_to_string(chan->state), sk);
568 switch (chan->state) {
571 l2cap_chan_cleanup_listen(sk);
573 __l2cap_state_change(chan, BT_CLOSED);
574 sock_set_flag(sk, SOCK_ZAPPED);
580 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
581 conn->hcon->type == ACL_LINK) {
582 __set_chan_timer(chan, sk->sk_sndtimeo);
583 l2cap_send_disconn_req(conn, chan, reason);
585 l2cap_chan_del(chan, reason);
589 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
590 conn->hcon->type == ACL_LINK) {
591 struct l2cap_conn_rsp rsp;
594 if (bt_sk(sk)->defer_setup)
595 result = L2CAP_CR_SEC_BLOCK;
597 result = L2CAP_CR_BAD_PSM;
598 l2cap_state_change(chan, BT_DISCONN);
600 rsp.scid = cpu_to_le16(chan->dcid);
601 rsp.dcid = cpu_to_le16(chan->scid);
602 rsp.result = cpu_to_le16(result);
603 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
604 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
608 l2cap_chan_del(chan, reason);
613 l2cap_chan_del(chan, reason);
618 sock_set_flag(sk, SOCK_ZAPPED);
624 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
626 if (chan->chan_type == L2CAP_CHAN_RAW) {
627 switch (chan->sec_level) {
628 case BT_SECURITY_HIGH:
629 return HCI_AT_DEDICATED_BONDING_MITM;
630 case BT_SECURITY_MEDIUM:
631 return HCI_AT_DEDICATED_BONDING;
633 return HCI_AT_NO_BONDING;
635 } else if (chan->psm == cpu_to_le16(0x0001)) {
636 if (chan->sec_level == BT_SECURITY_LOW)
637 chan->sec_level = BT_SECURITY_SDP;
639 if (chan->sec_level == BT_SECURITY_HIGH)
640 return HCI_AT_NO_BONDING_MITM;
642 return HCI_AT_NO_BONDING;
644 switch (chan->sec_level) {
645 case BT_SECURITY_HIGH:
646 return HCI_AT_GENERAL_BONDING_MITM;
647 case BT_SECURITY_MEDIUM:
648 return HCI_AT_GENERAL_BONDING;
650 return HCI_AT_NO_BONDING;
655 /* Service level security */
656 int l2cap_chan_check_security(struct l2cap_chan *chan)
658 struct l2cap_conn *conn = chan->conn;
661 auth_type = l2cap_get_auth_type(chan);
663 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
666 static u8 l2cap_get_ident(struct l2cap_conn *conn)
670 /* Get next available identificator.
671 * 1 - 128 are used by kernel.
672 * 129 - 199 are reserved.
673 * 200 - 254 are used by utilities like l2ping, etc.
676 spin_lock(&conn->lock);
678 if (++conn->tx_ident > 128)
683 spin_unlock(&conn->lock);
688 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
690 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
693 BT_DBG("code 0x%2.2x", code);
698 if (lmp_no_flush_capable(conn->hcon->hdev))
699 flags = ACL_START_NO_FLUSH;
703 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
704 skb->priority = HCI_PRIO_MAX;
706 hci_send_acl(conn->hchan, skb, flags);
709 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
711 struct hci_conn *hcon = chan->conn->hcon;
714 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
717 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
718 lmp_no_flush_capable(hcon->hdev))
719 flags = ACL_START_NO_FLUSH;
723 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
724 hci_send_acl(chan->conn->hchan, skb, flags);
727 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
730 struct l2cap_hdr *lh;
731 struct l2cap_conn *conn = chan->conn;
734 if (chan->state != BT_CONNECTED)
737 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
738 hlen = L2CAP_EXT_HDR_SIZE;
740 hlen = L2CAP_ENH_HDR_SIZE;
742 if (chan->fcs == L2CAP_FCS_CRC16)
743 hlen += L2CAP_FCS_SIZE;
745 BT_DBG("chan %p, control 0x%8.8x", chan, control);
747 count = min_t(unsigned int, conn->mtu, hlen);
749 control |= __set_sframe(chan);
751 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
752 control |= __set_ctrl_final(chan);
754 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
755 control |= __set_ctrl_poll(chan);
757 skb = bt_skb_alloc(count, GFP_ATOMIC);
761 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
762 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
763 lh->cid = cpu_to_le16(chan->dcid);
765 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
767 if (chan->fcs == L2CAP_FCS_CRC16) {
768 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
769 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
772 skb->priority = HCI_PRIO_MAX;
773 l2cap_do_send(chan, skb);
776 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
778 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
779 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
780 set_bit(CONN_RNR_SENT, &chan->conn_state);
782 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
784 control |= __set_reqseq(chan, chan->buffer_seq);
786 l2cap_send_sframe(chan, control);
789 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
793 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
794 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
796 if (control->sframe) {
797 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
798 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
799 packed |= L2CAP_CTRL_FRAME_TYPE;
801 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
802 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
808 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
810 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
811 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
813 if (enh & L2CAP_CTRL_FRAME_TYPE) {
816 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
817 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
824 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
825 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
832 static u32 __pack_extended_control(struct l2cap_ctrl *control)
836 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
837 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
839 if (control->sframe) {
840 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
841 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
842 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
844 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
845 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
851 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
853 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
854 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
856 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
859 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
860 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
867 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
868 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
875 static inline void __unpack_control(struct l2cap_chan *chan,
878 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
879 __unpack_extended_control(get_unaligned_le32(skb->data),
880 &bt_cb(skb)->control);
882 __unpack_enhanced_control(get_unaligned_le16(skb->data),
883 &bt_cb(skb)->control);
887 static inline void __pack_control(struct l2cap_chan *chan,
888 struct l2cap_ctrl *control,
891 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
892 put_unaligned_le32(__pack_extended_control(control),
893 skb->data + L2CAP_HDR_SIZE);
895 put_unaligned_le16(__pack_enhanced_control(control),
896 skb->data + L2CAP_HDR_SIZE);
900 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
902 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
905 static void l2cap_send_conn_req(struct l2cap_chan *chan)
907 struct l2cap_conn *conn = chan->conn;
908 struct l2cap_conn_req req;
910 req.scid = cpu_to_le16(chan->scid);
913 chan->ident = l2cap_get_ident(conn);
915 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
917 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
920 static void l2cap_do_start(struct l2cap_chan *chan)
922 struct l2cap_conn *conn = chan->conn;
924 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
925 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
928 if (l2cap_chan_check_security(chan) &&
929 __l2cap_no_conn_pending(chan))
930 l2cap_send_conn_req(chan);
932 struct l2cap_info_req req;
933 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
935 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
936 conn->info_ident = l2cap_get_ident(conn);
938 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
940 l2cap_send_cmd(conn, conn->info_ident,
941 L2CAP_INFO_REQ, sizeof(req), &req);
945 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
947 u32 local_feat_mask = l2cap_feat_mask;
949 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
952 case L2CAP_MODE_ERTM:
953 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
954 case L2CAP_MODE_STREAMING:
955 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
961 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
963 struct sock *sk = chan->sk;
964 struct l2cap_disconn_req req;
969 if (chan->mode == L2CAP_MODE_ERTM) {
970 __clear_retrans_timer(chan);
971 __clear_monitor_timer(chan);
972 __clear_ack_timer(chan);
975 req.dcid = cpu_to_le16(chan->dcid);
976 req.scid = cpu_to_le16(chan->scid);
977 l2cap_send_cmd(conn, l2cap_get_ident(conn),
978 L2CAP_DISCONN_REQ, sizeof(req), &req);
981 __l2cap_state_change(chan, BT_DISCONN);
982 __l2cap_chan_set_err(chan, err);
986 /* ---- L2CAP connections ---- */
987 static void l2cap_conn_start(struct l2cap_conn *conn)
989 struct l2cap_chan *chan, *tmp;
991 BT_DBG("conn %p", conn);
993 mutex_lock(&conn->chan_lock);
995 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
996 struct sock *sk = chan->sk;
998 l2cap_chan_lock(chan);
1000 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1001 l2cap_chan_unlock(chan);
1005 if (chan->state == BT_CONNECT) {
1006 if (!l2cap_chan_check_security(chan) ||
1007 !__l2cap_no_conn_pending(chan)) {
1008 l2cap_chan_unlock(chan);
1012 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1013 && test_bit(CONF_STATE2_DEVICE,
1014 &chan->conf_state)) {
1015 l2cap_chan_close(chan, ECONNRESET);
1016 l2cap_chan_unlock(chan);
1020 l2cap_send_conn_req(chan);
1022 } else if (chan->state == BT_CONNECT2) {
1023 struct l2cap_conn_rsp rsp;
1025 rsp.scid = cpu_to_le16(chan->dcid);
1026 rsp.dcid = cpu_to_le16(chan->scid);
1028 if (l2cap_chan_check_security(chan)) {
1030 if (bt_sk(sk)->defer_setup) {
1031 struct sock *parent = bt_sk(sk)->parent;
1032 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1033 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1035 parent->sk_data_ready(parent, 0);
1038 __l2cap_state_change(chan, BT_CONFIG);
1039 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1040 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1044 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1045 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1048 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1051 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1052 rsp.result != L2CAP_CR_SUCCESS) {
1053 l2cap_chan_unlock(chan);
1057 set_bit(CONF_REQ_SENT, &chan->conf_state);
1058 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1059 l2cap_build_conf_req(chan, buf), buf);
1060 chan->num_conf_req++;
1063 l2cap_chan_unlock(chan);
1066 mutex_unlock(&conn->chan_lock);
1069 /* Find socket with cid and source bdaddr.
1070 * Returns closest match, locked.
1072 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1075 struct l2cap_chan *c, *c1 = NULL;
1077 read_lock(&chan_list_lock);
1079 list_for_each_entry(c, &chan_list, global_l) {
1080 struct sock *sk = c->sk;
1082 if (state && c->state != state)
1085 if (c->scid == cid) {
1087 if (!bacmp(&bt_sk(sk)->src, src)) {
1088 read_unlock(&chan_list_lock);
1093 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1098 read_unlock(&chan_list_lock);
1103 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1105 struct sock *parent, *sk;
1106 struct l2cap_chan *chan, *pchan;
1110 /* Check if we have socket listening on cid */
1111 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1120 /* Check for backlog size */
1121 if (sk_acceptq_is_full(parent)) {
1122 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1126 chan = pchan->ops->new_connection(pchan->data);
1132 hci_conn_hold(conn->hcon);
1134 bacpy(&bt_sk(sk)->src, conn->src);
1135 bacpy(&bt_sk(sk)->dst, conn->dst);
1137 bt_accept_enqueue(parent, sk);
1139 l2cap_chan_add(conn, chan);
1141 __set_chan_timer(chan, sk->sk_sndtimeo);
1143 __l2cap_state_change(chan, BT_CONNECTED);
1144 parent->sk_data_ready(parent, 0);
1147 release_sock(parent);
1150 static void l2cap_chan_ready(struct l2cap_chan *chan)
1152 struct sock *sk = chan->sk;
1153 struct sock *parent;
1157 parent = bt_sk(sk)->parent;
1159 BT_DBG("sk %p, parent %p", sk, parent);
1161 chan->conf_state = 0;
1162 __clear_chan_timer(chan);
1164 __l2cap_state_change(chan, BT_CONNECTED);
1165 sk->sk_state_change(sk);
1168 parent->sk_data_ready(parent, 0);
1173 static void l2cap_conn_ready(struct l2cap_conn *conn)
1175 struct l2cap_chan *chan;
1177 BT_DBG("conn %p", conn);
1179 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1180 l2cap_le_conn_ready(conn);
1182 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1183 smp_conn_security(conn, conn->hcon->pending_sec_level);
1185 mutex_lock(&conn->chan_lock);
1187 list_for_each_entry(chan, &conn->chan_l, list) {
1189 l2cap_chan_lock(chan);
1191 if (conn->hcon->type == LE_LINK) {
1192 if (smp_conn_security(conn, chan->sec_level))
1193 l2cap_chan_ready(chan);
1195 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1196 struct sock *sk = chan->sk;
1197 __clear_chan_timer(chan);
1199 __l2cap_state_change(chan, BT_CONNECTED);
1200 sk->sk_state_change(sk);
1203 } else if (chan->state == BT_CONNECT)
1204 l2cap_do_start(chan);
1206 l2cap_chan_unlock(chan);
1209 mutex_unlock(&conn->chan_lock);
1212 /* Notify sockets that we cannot guaranty reliability anymore */
1213 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1215 struct l2cap_chan *chan;
1217 BT_DBG("conn %p", conn);
1219 mutex_lock(&conn->chan_lock);
1221 list_for_each_entry(chan, &conn->chan_l, list) {
1222 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1223 __l2cap_chan_set_err(chan, err);
1226 mutex_unlock(&conn->chan_lock);
1229 static void l2cap_info_timeout(struct work_struct *work)
1231 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1234 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1235 conn->info_ident = 0;
1237 l2cap_conn_start(conn);
1240 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1242 struct l2cap_conn *conn = hcon->l2cap_data;
1243 struct l2cap_chan *chan, *l;
1248 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1250 kfree_skb(conn->rx_skb);
1252 mutex_lock(&conn->chan_lock);
1255 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1256 l2cap_chan_lock(chan);
1258 l2cap_chan_del(chan, err);
1260 l2cap_chan_unlock(chan);
1262 chan->ops->close(chan->data);
1265 mutex_unlock(&conn->chan_lock);
1267 hci_chan_del(conn->hchan);
1269 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1270 cancel_delayed_work_sync(&conn->info_timer);
1272 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1273 cancel_delayed_work_sync(&conn->security_timer);
1274 smp_chan_destroy(conn);
1277 hcon->l2cap_data = NULL;
1281 static void security_timeout(struct work_struct *work)
1283 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1284 security_timer.work);
1286 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1289 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1291 struct l2cap_conn *conn = hcon->l2cap_data;
1292 struct hci_chan *hchan;
1297 hchan = hci_chan_create(hcon);
1301 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1303 hci_chan_del(hchan);
1307 hcon->l2cap_data = conn;
1309 conn->hchan = hchan;
1311 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1313 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1314 conn->mtu = hcon->hdev->le_mtu;
1316 conn->mtu = hcon->hdev->acl_mtu;
1318 conn->src = &hcon->hdev->bdaddr;
1319 conn->dst = &hcon->dst;
1321 conn->feat_mask = 0;
1323 spin_lock_init(&conn->lock);
1324 mutex_init(&conn->chan_lock);
1326 INIT_LIST_HEAD(&conn->chan_l);
1328 if (hcon->type == LE_LINK)
1329 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1331 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1333 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1338 /* ---- Socket interface ---- */
1340 /* Find socket with psm and source bdaddr.
1341 * Returns closest match.
1343 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1345 struct l2cap_chan *c, *c1 = NULL;
1347 read_lock(&chan_list_lock);
1349 list_for_each_entry(c, &chan_list, global_l) {
1350 struct sock *sk = c->sk;
1352 if (state && c->state != state)
1355 if (c->psm == psm) {
1357 if (!bacmp(&bt_sk(sk)->src, src)) {
1358 read_unlock(&chan_list_lock);
1363 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1368 read_unlock(&chan_list_lock);
1373 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1375 struct sock *sk = chan->sk;
1376 bdaddr_t *src = &bt_sk(sk)->src;
1377 struct l2cap_conn *conn;
1378 struct hci_conn *hcon;
1379 struct hci_dev *hdev;
1383 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1384 __le16_to_cpu(chan->psm));
1386 hdev = hci_get_route(dst, src);
1388 return -EHOSTUNREACH;
1392 l2cap_chan_lock(chan);
1394 /* PSM must be odd and lsb of upper byte must be 0 */
1395 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1396 chan->chan_type != L2CAP_CHAN_RAW) {
1401 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1406 switch (chan->mode) {
1407 case L2CAP_MODE_BASIC:
1409 case L2CAP_MODE_ERTM:
1410 case L2CAP_MODE_STREAMING:
1421 switch (sk->sk_state) {
1425 /* Already connecting */
1431 /* Already connected */
1447 /* Set destination address and psm */
1448 bacpy(&bt_sk(sk)->dst, dst);
1455 auth_type = l2cap_get_auth_type(chan);
1457 if (chan->dcid == L2CAP_CID_LE_DATA)
1458 hcon = hci_connect(hdev, LE_LINK, dst,
1459 chan->sec_level, auth_type);
1461 hcon = hci_connect(hdev, ACL_LINK, dst,
1462 chan->sec_level, auth_type);
1465 err = PTR_ERR(hcon);
1469 conn = l2cap_conn_add(hcon, 0);
1476 /* Update source addr of the socket */
1477 bacpy(src, conn->src);
1479 l2cap_chan_unlock(chan);
1480 l2cap_chan_add(conn, chan);
1481 l2cap_chan_lock(chan);
1483 l2cap_state_change(chan, BT_CONNECT);
1484 __set_chan_timer(chan, sk->sk_sndtimeo);
1486 if (hcon->state == BT_CONNECTED) {
1487 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1488 __clear_chan_timer(chan);
1489 if (l2cap_chan_check_security(chan))
1490 l2cap_state_change(chan, BT_CONNECTED);
1492 l2cap_do_start(chan);
1498 l2cap_chan_unlock(chan);
1499 hci_dev_unlock(hdev);
1504 int __l2cap_wait_ack(struct sock *sk)
1506 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1507 DECLARE_WAITQUEUE(wait, current);
1511 add_wait_queue(sk_sleep(sk), &wait);
1512 set_current_state(TASK_INTERRUPTIBLE);
1513 while (chan->unacked_frames > 0 && chan->conn) {
1517 if (signal_pending(current)) {
1518 err = sock_intr_errno(timeo);
1523 timeo = schedule_timeout(timeo);
1525 set_current_state(TASK_INTERRUPTIBLE);
1527 err = sock_error(sk);
1531 set_current_state(TASK_RUNNING);
1532 remove_wait_queue(sk_sleep(sk), &wait);
1536 static void l2cap_monitor_timeout(struct work_struct *work)
1538 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1539 monitor_timer.work);
1541 BT_DBG("chan %p", chan);
1543 l2cap_chan_lock(chan);
1545 if (chan->retry_count >= chan->remote_max_tx) {
1546 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1547 l2cap_chan_unlock(chan);
1548 l2cap_chan_put(chan);
1552 chan->retry_count++;
1553 __set_monitor_timer(chan);
1555 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1556 l2cap_chan_unlock(chan);
1557 l2cap_chan_put(chan);
1560 static void l2cap_retrans_timeout(struct work_struct *work)
1562 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1563 retrans_timer.work);
1565 BT_DBG("chan %p", chan);
1567 l2cap_chan_lock(chan);
1569 chan->retry_count = 1;
1570 __set_monitor_timer(chan);
1572 set_bit(CONN_WAIT_F, &chan->conn_state);
1574 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1576 l2cap_chan_unlock(chan);
1577 l2cap_chan_put(chan);
1580 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1582 struct sk_buff *skb;
1584 while ((skb = skb_peek(&chan->tx_q)) &&
1585 chan->unacked_frames) {
1586 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1589 skb = skb_dequeue(&chan->tx_q);
1592 chan->unacked_frames--;
1595 if (!chan->unacked_frames)
1596 __clear_retrans_timer(chan);
1599 static void l2cap_streaming_send(struct l2cap_chan *chan)
1601 struct sk_buff *skb;
1605 while ((skb = skb_dequeue(&chan->tx_q))) {
1606 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1607 control |= __set_txseq(chan, chan->next_tx_seq);
1608 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1610 if (chan->fcs == L2CAP_FCS_CRC16) {
1611 fcs = crc16(0, (u8 *)skb->data,
1612 skb->len - L2CAP_FCS_SIZE);
1613 put_unaligned_le16(fcs,
1614 skb->data + skb->len - L2CAP_FCS_SIZE);
1617 l2cap_do_send(chan, skb);
1619 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1623 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1625 struct sk_buff *skb, *tx_skb;
1629 skb = skb_peek(&chan->tx_q);
1633 while (bt_cb(skb)->tx_seq != tx_seq) {
1634 if (skb_queue_is_last(&chan->tx_q, skb))
1637 skb = skb_queue_next(&chan->tx_q, skb);
1640 if (chan->remote_max_tx &&
1641 bt_cb(skb)->retries == chan->remote_max_tx) {
1642 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1646 tx_skb = skb_clone(skb, GFP_ATOMIC);
1647 bt_cb(skb)->retries++;
1649 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1650 control &= __get_sar_mask(chan);
1652 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1653 control |= __set_ctrl_final(chan);
1655 control |= __set_reqseq(chan, chan->buffer_seq);
1656 control |= __set_txseq(chan, tx_seq);
1658 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1660 if (chan->fcs == L2CAP_FCS_CRC16) {
1661 fcs = crc16(0, (u8 *)tx_skb->data,
1662 tx_skb->len - L2CAP_FCS_SIZE);
1663 put_unaligned_le16(fcs,
1664 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1667 l2cap_do_send(chan, tx_skb);
1670 static int l2cap_ertm_send(struct l2cap_chan *chan)
1672 struct sk_buff *skb, *tx_skb;
1677 if (chan->state != BT_CONNECTED)
1680 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1682 if (chan->remote_max_tx &&
1683 bt_cb(skb)->retries == chan->remote_max_tx) {
1684 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1688 tx_skb = skb_clone(skb, GFP_ATOMIC);
1690 bt_cb(skb)->retries++;
1692 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1693 control &= __get_sar_mask(chan);
1695 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1696 control |= __set_ctrl_final(chan);
1698 control |= __set_reqseq(chan, chan->buffer_seq);
1699 control |= __set_txseq(chan, chan->next_tx_seq);
1701 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1703 if (chan->fcs == L2CAP_FCS_CRC16) {
1704 fcs = crc16(0, (u8 *)skb->data,
1705 tx_skb->len - L2CAP_FCS_SIZE);
1706 put_unaligned_le16(fcs, skb->data +
1707 tx_skb->len - L2CAP_FCS_SIZE);
1710 l2cap_do_send(chan, tx_skb);
1712 __set_retrans_timer(chan);
1714 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1716 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1718 if (bt_cb(skb)->retries == 1) {
1719 chan->unacked_frames++;
1722 __clear_ack_timer(chan);
1725 chan->frames_sent++;
1727 if (skb_queue_is_last(&chan->tx_q, skb))
1728 chan->tx_send_head = NULL;
1730 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1736 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1740 if (!skb_queue_empty(&chan->tx_q))
1741 chan->tx_send_head = chan->tx_q.next;
1743 chan->next_tx_seq = chan->expected_ack_seq;
1744 ret = l2cap_ertm_send(chan);
1748 static void __l2cap_send_ack(struct l2cap_chan *chan)
1752 control |= __set_reqseq(chan, chan->buffer_seq);
1754 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1755 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1756 set_bit(CONN_RNR_SENT, &chan->conn_state);
1757 l2cap_send_sframe(chan, control);
1761 if (l2cap_ertm_send(chan) > 0)
1764 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1765 l2cap_send_sframe(chan, control);
1768 static void l2cap_send_ack(struct l2cap_chan *chan)
1770 __clear_ack_timer(chan);
1771 __l2cap_send_ack(chan);
1774 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1776 struct srej_list *tail;
1779 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1780 control |= __set_ctrl_final(chan);
1782 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1783 control |= __set_reqseq(chan, tail->tx_seq);
1785 l2cap_send_sframe(chan, control);
1788 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1789 struct msghdr *msg, int len,
1790 int count, struct sk_buff *skb)
1792 struct l2cap_conn *conn = chan->conn;
1793 struct sk_buff **frag;
1796 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1802 /* Continuation fragments (no L2CAP header) */
1803 frag = &skb_shinfo(skb)->frag_list;
1805 count = min_t(unsigned int, conn->mtu, len);
1807 *frag = chan->ops->alloc_skb(chan, count,
1808 msg->msg_flags & MSG_DONTWAIT);
1811 return PTR_ERR(*frag);
1812 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1815 (*frag)->priority = skb->priority;
1820 frag = &(*frag)->next;
1826 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1827 struct msghdr *msg, size_t len,
1830 struct l2cap_conn *conn = chan->conn;
1831 struct sk_buff *skb;
1832 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1833 struct l2cap_hdr *lh;
1835 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1837 count = min_t(unsigned int, (conn->mtu - hlen), len);
1839 skb = chan->ops->alloc_skb(chan, count + hlen,
1840 msg->msg_flags & MSG_DONTWAIT);
1844 skb->priority = priority;
1846 /* Create L2CAP header */
1847 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1848 lh->cid = cpu_to_le16(chan->dcid);
1849 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1850 put_unaligned(chan->psm, skb_put(skb, 2));
1852 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1853 if (unlikely(err < 0)) {
1855 return ERR_PTR(err);
1860 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1861 struct msghdr *msg, size_t len,
1864 struct l2cap_conn *conn = chan->conn;
1865 struct sk_buff *skb;
1866 int err, count, hlen = L2CAP_HDR_SIZE;
1867 struct l2cap_hdr *lh;
1869 BT_DBG("chan %p len %d", chan, (int)len);
1871 count = min_t(unsigned int, (conn->mtu - hlen), len);
1873 skb = chan->ops->alloc_skb(chan, count + hlen,
1874 msg->msg_flags & MSG_DONTWAIT);
1878 skb->priority = priority;
1880 /* Create L2CAP header */
1881 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1882 lh->cid = cpu_to_le16(chan->dcid);
1883 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1885 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1886 if (unlikely(err < 0)) {
1888 return ERR_PTR(err);
1893 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1894 struct msghdr *msg, size_t len,
1895 u32 control, u16 sdulen)
1897 struct l2cap_conn *conn = chan->conn;
1898 struct sk_buff *skb;
1899 int err, count, hlen;
1900 struct l2cap_hdr *lh;
1902 BT_DBG("chan %p len %d", chan, (int)len);
1905 return ERR_PTR(-ENOTCONN);
1907 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1908 hlen = L2CAP_EXT_HDR_SIZE;
1910 hlen = L2CAP_ENH_HDR_SIZE;
1913 hlen += L2CAP_SDULEN_SIZE;
1915 if (chan->fcs == L2CAP_FCS_CRC16)
1916 hlen += L2CAP_FCS_SIZE;
1918 count = min_t(unsigned int, (conn->mtu - hlen), len);
1920 skb = chan->ops->alloc_skb(chan, count + hlen,
1921 msg->msg_flags & MSG_DONTWAIT);
1925 /* Create L2CAP header */
1926 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1927 lh->cid = cpu_to_le16(chan->dcid);
1928 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1930 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1933 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1935 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1936 if (unlikely(err < 0)) {
1938 return ERR_PTR(err);
1941 if (chan->fcs == L2CAP_FCS_CRC16)
1942 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1944 bt_cb(skb)->retries = 0;
1948 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1950 struct sk_buff *skb;
1951 struct sk_buff_head sar_queue;
1955 skb_queue_head_init(&sar_queue);
1956 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1957 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1959 return PTR_ERR(skb);
1961 __skb_queue_tail(&sar_queue, skb);
1962 len -= chan->remote_mps;
1963 size += chan->remote_mps;
1968 if (len > chan->remote_mps) {
1969 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1970 buflen = chan->remote_mps;
1972 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1976 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1978 skb_queue_purge(&sar_queue);
1979 return PTR_ERR(skb);
1982 __skb_queue_tail(&sar_queue, skb);
1986 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1987 if (chan->tx_send_head == NULL)
1988 chan->tx_send_head = sar_queue.next;
1993 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1996 struct sk_buff *skb;
2000 /* Connectionless channel */
2001 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2002 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2004 return PTR_ERR(skb);
2006 l2cap_do_send(chan, skb);
2010 switch (chan->mode) {
2011 case L2CAP_MODE_BASIC:
2012 /* Check outgoing MTU */
2013 if (len > chan->omtu)
2016 /* Create a basic PDU */
2017 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2019 return PTR_ERR(skb);
2021 l2cap_do_send(chan, skb);
2025 case L2CAP_MODE_ERTM:
2026 case L2CAP_MODE_STREAMING:
2027 /* Entire SDU fits into one PDU */
2028 if (len <= chan->remote_mps) {
2029 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
2030 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
2033 return PTR_ERR(skb);
2035 __skb_queue_tail(&chan->tx_q, skb);
2037 if (chan->tx_send_head == NULL)
2038 chan->tx_send_head = skb;
2041 /* Segment SDU into multiples PDUs */
2042 err = l2cap_sar_segment_sdu(chan, msg, len);
2047 if (chan->mode == L2CAP_MODE_STREAMING) {
2048 l2cap_streaming_send(chan);
2053 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
2054 test_bit(CONN_WAIT_F, &chan->conn_state)) {
2059 err = l2cap_ertm_send(chan);
2066 BT_DBG("bad state %1.1x", chan->mode);
2073 /* Copy frame to all raw sockets on that connection */
2074 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2076 struct sk_buff *nskb;
2077 struct l2cap_chan *chan;
2079 BT_DBG("conn %p", conn);
2081 mutex_lock(&conn->chan_lock);
2083 list_for_each_entry(chan, &conn->chan_l, list) {
2084 struct sock *sk = chan->sk;
2085 if (chan->chan_type != L2CAP_CHAN_RAW)
2088 /* Don't send frame to the socket it came from */
2091 nskb = skb_clone(skb, GFP_ATOMIC);
2095 if (chan->ops->recv(chan->data, nskb))
2099 mutex_unlock(&conn->chan_lock);
2102 /* ---- L2CAP signalling commands ---- */
2103 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2104 u8 code, u8 ident, u16 dlen, void *data)
2106 struct sk_buff *skb, **frag;
2107 struct l2cap_cmd_hdr *cmd;
2108 struct l2cap_hdr *lh;
2111 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2112 conn, code, ident, dlen);
2114 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2115 count = min_t(unsigned int, conn->mtu, len);
2117 skb = bt_skb_alloc(count, GFP_ATOMIC);
2121 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2122 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2124 if (conn->hcon->type == LE_LINK)
2125 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2127 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2129 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2132 cmd->len = cpu_to_le16(dlen);
2135 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2136 memcpy(skb_put(skb, count), data, count);
2142 /* Continuation fragments (no L2CAP header) */
2143 frag = &skb_shinfo(skb)->frag_list;
2145 count = min_t(unsigned int, conn->mtu, len);
2147 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2151 memcpy(skb_put(*frag, count), data, count);
2156 frag = &(*frag)->next;
2166 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2168 struct l2cap_conf_opt *opt = *ptr;
2171 len = L2CAP_CONF_OPT_SIZE + opt->len;
2179 *val = *((u8 *) opt->val);
2183 *val = get_unaligned_le16(opt->val);
2187 *val = get_unaligned_le32(opt->val);
2191 *val = (unsigned long) opt->val;
2195 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2199 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2201 struct l2cap_conf_opt *opt = *ptr;
2203 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2210 *((u8 *) opt->val) = val;
2214 put_unaligned_le16(val, opt->val);
2218 put_unaligned_le32(val, opt->val);
2222 memcpy(opt->val, (void *) val, len);
2226 *ptr += L2CAP_CONF_OPT_SIZE + len;
2229 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2231 struct l2cap_conf_efs efs;
2233 switch (chan->mode) {
2234 case L2CAP_MODE_ERTM:
2235 efs.id = chan->local_id;
2236 efs.stype = chan->local_stype;
2237 efs.msdu = cpu_to_le16(chan->local_msdu);
2238 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2239 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2240 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2243 case L2CAP_MODE_STREAMING:
2245 efs.stype = L2CAP_SERV_BESTEFFORT;
2246 efs.msdu = cpu_to_le16(chan->local_msdu);
2247 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2256 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2257 (unsigned long) &efs);
2260 static void l2cap_ack_timeout(struct work_struct *work)
2262 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2265 BT_DBG("chan %p", chan);
2267 l2cap_chan_lock(chan);
2269 __l2cap_send_ack(chan);
2271 l2cap_chan_unlock(chan);
2273 l2cap_chan_put(chan);
2276 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2280 chan->expected_ack_seq = 0;
2281 chan->unacked_frames = 0;
2282 chan->buffer_seq = 0;
2283 chan->num_acked = 0;
2284 chan->frames_sent = 0;
2286 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2287 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2288 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2290 skb_queue_head_init(&chan->srej_q);
2292 INIT_LIST_HEAD(&chan->srej_l);
2293 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2297 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2300 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2303 case L2CAP_MODE_STREAMING:
2304 case L2CAP_MODE_ERTM:
2305 if (l2cap_mode_supported(mode, remote_feat_mask))
2309 return L2CAP_MODE_BASIC;
2313 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2315 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2318 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2320 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2323 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2325 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2326 __l2cap_ews_supported(chan)) {
2327 /* use extended control field */
2328 set_bit(FLAG_EXT_CTRL, &chan->flags);
2329 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2331 chan->tx_win = min_t(u16, chan->tx_win,
2332 L2CAP_DEFAULT_TX_WINDOW);
2333 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2337 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2339 struct l2cap_conf_req *req = data;
2340 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2341 void *ptr = req->data;
2344 BT_DBG("chan %p", chan);
2346 if (chan->num_conf_req || chan->num_conf_rsp)
2349 switch (chan->mode) {
2350 case L2CAP_MODE_STREAMING:
2351 case L2CAP_MODE_ERTM:
2352 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2355 if (__l2cap_efs_supported(chan))
2356 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2360 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2365 if (chan->imtu != L2CAP_DEFAULT_MTU)
2366 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2368 switch (chan->mode) {
2369 case L2CAP_MODE_BASIC:
2370 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2371 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2374 rfc.mode = L2CAP_MODE_BASIC;
2376 rfc.max_transmit = 0;
2377 rfc.retrans_timeout = 0;
2378 rfc.monitor_timeout = 0;
2379 rfc.max_pdu_size = 0;
2381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2382 (unsigned long) &rfc);
2385 case L2CAP_MODE_ERTM:
2386 rfc.mode = L2CAP_MODE_ERTM;
2387 rfc.max_transmit = chan->max_tx;
2388 rfc.retrans_timeout = 0;
2389 rfc.monitor_timeout = 0;
2391 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2392 L2CAP_EXT_HDR_SIZE -
2395 rfc.max_pdu_size = cpu_to_le16(size);
2397 l2cap_txwin_setup(chan);
2399 rfc.txwin_size = min_t(u16, chan->tx_win,
2400 L2CAP_DEFAULT_TX_WINDOW);
2402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2403 (unsigned long) &rfc);
2405 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2406 l2cap_add_opt_efs(&ptr, chan);
2408 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2411 if (chan->fcs == L2CAP_FCS_NONE ||
2412 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2413 chan->fcs = L2CAP_FCS_NONE;
2414 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2417 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2422 case L2CAP_MODE_STREAMING:
2423 rfc.mode = L2CAP_MODE_STREAMING;
2425 rfc.max_transmit = 0;
2426 rfc.retrans_timeout = 0;
2427 rfc.monitor_timeout = 0;
2429 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2430 L2CAP_EXT_HDR_SIZE -
2433 rfc.max_pdu_size = cpu_to_le16(size);
2435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2436 (unsigned long) &rfc);
2438 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2439 l2cap_add_opt_efs(&ptr, chan);
2441 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2444 if (chan->fcs == L2CAP_FCS_NONE ||
2445 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2446 chan->fcs = L2CAP_FCS_NONE;
2447 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2452 req->dcid = cpu_to_le16(chan->dcid);
2453 req->flags = cpu_to_le16(0);
2458 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2460 struct l2cap_conf_rsp *rsp = data;
2461 void *ptr = rsp->data;
2462 void *req = chan->conf_req;
2463 int len = chan->conf_len;
2464 int type, hint, olen;
2466 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2467 struct l2cap_conf_efs efs;
2469 u16 mtu = L2CAP_DEFAULT_MTU;
2470 u16 result = L2CAP_CONF_SUCCESS;
2473 BT_DBG("chan %p", chan);
2475 while (len >= L2CAP_CONF_OPT_SIZE) {
2476 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2478 hint = type & L2CAP_CONF_HINT;
2479 type &= L2CAP_CONF_MASK;
2482 case L2CAP_CONF_MTU:
2486 case L2CAP_CONF_FLUSH_TO:
2487 chan->flush_to = val;
2490 case L2CAP_CONF_QOS:
2493 case L2CAP_CONF_RFC:
2494 if (olen == sizeof(rfc))
2495 memcpy(&rfc, (void *) val, olen);
2498 case L2CAP_CONF_FCS:
2499 if (val == L2CAP_FCS_NONE)
2500 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2503 case L2CAP_CONF_EFS:
2505 if (olen == sizeof(efs))
2506 memcpy(&efs, (void *) val, olen);
2509 case L2CAP_CONF_EWS:
2511 return -ECONNREFUSED;
2513 set_bit(FLAG_EXT_CTRL, &chan->flags);
2514 set_bit(CONF_EWS_RECV, &chan->conf_state);
2515 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2516 chan->remote_tx_win = val;
2523 result = L2CAP_CONF_UNKNOWN;
2524 *((u8 *) ptr++) = type;
2529 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2532 switch (chan->mode) {
2533 case L2CAP_MODE_STREAMING:
2534 case L2CAP_MODE_ERTM:
2535 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2536 chan->mode = l2cap_select_mode(rfc.mode,
2537 chan->conn->feat_mask);
2542 if (__l2cap_efs_supported(chan))
2543 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2545 return -ECONNREFUSED;
2548 if (chan->mode != rfc.mode)
2549 return -ECONNREFUSED;
2555 if (chan->mode != rfc.mode) {
2556 result = L2CAP_CONF_UNACCEPT;
2557 rfc.mode = chan->mode;
2559 if (chan->num_conf_rsp == 1)
2560 return -ECONNREFUSED;
2562 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2563 sizeof(rfc), (unsigned long) &rfc);
2566 if (result == L2CAP_CONF_SUCCESS) {
2567 /* Configure output options and let the other side know
2568 * which ones we don't like. */
2570 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2571 result = L2CAP_CONF_UNACCEPT;
2574 set_bit(CONF_MTU_DONE, &chan->conf_state);
2576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2579 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2580 efs.stype != L2CAP_SERV_NOTRAFIC &&
2581 efs.stype != chan->local_stype) {
2583 result = L2CAP_CONF_UNACCEPT;
2585 if (chan->num_conf_req >= 1)
2586 return -ECONNREFUSED;
2588 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2590 (unsigned long) &efs);
2592 /* Send PENDING Conf Rsp */
2593 result = L2CAP_CONF_PENDING;
2594 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2599 case L2CAP_MODE_BASIC:
2600 chan->fcs = L2CAP_FCS_NONE;
2601 set_bit(CONF_MODE_DONE, &chan->conf_state);
2604 case L2CAP_MODE_ERTM:
2605 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2606 chan->remote_tx_win = rfc.txwin_size;
2608 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2610 chan->remote_max_tx = rfc.max_transmit;
2612 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2614 L2CAP_EXT_HDR_SIZE -
2617 rfc.max_pdu_size = cpu_to_le16(size);
2618 chan->remote_mps = size;
2620 rfc.retrans_timeout =
2621 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2622 rfc.monitor_timeout =
2623 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2625 set_bit(CONF_MODE_DONE, &chan->conf_state);
2627 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2628 sizeof(rfc), (unsigned long) &rfc);
2630 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2631 chan->remote_id = efs.id;
2632 chan->remote_stype = efs.stype;
2633 chan->remote_msdu = le16_to_cpu(efs.msdu);
2634 chan->remote_flush_to =
2635 le32_to_cpu(efs.flush_to);
2636 chan->remote_acc_lat =
2637 le32_to_cpu(efs.acc_lat);
2638 chan->remote_sdu_itime =
2639 le32_to_cpu(efs.sdu_itime);
2640 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2641 sizeof(efs), (unsigned long) &efs);
2645 case L2CAP_MODE_STREAMING:
2646 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2648 L2CAP_EXT_HDR_SIZE -
2651 rfc.max_pdu_size = cpu_to_le16(size);
2652 chan->remote_mps = size;
2654 set_bit(CONF_MODE_DONE, &chan->conf_state);
2656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2657 sizeof(rfc), (unsigned long) &rfc);
2662 result = L2CAP_CONF_UNACCEPT;
2664 memset(&rfc, 0, sizeof(rfc));
2665 rfc.mode = chan->mode;
2668 if (result == L2CAP_CONF_SUCCESS)
2669 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2671 rsp->scid = cpu_to_le16(chan->dcid);
2672 rsp->result = cpu_to_le16(result);
2673 rsp->flags = cpu_to_le16(0x0000);
2678 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2680 struct l2cap_conf_req *req = data;
2681 void *ptr = req->data;
2684 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2685 struct l2cap_conf_efs efs;
2687 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2689 while (len >= L2CAP_CONF_OPT_SIZE) {
2690 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2693 case L2CAP_CONF_MTU:
2694 if (val < L2CAP_DEFAULT_MIN_MTU) {
2695 *result = L2CAP_CONF_UNACCEPT;
2696 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2699 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2702 case L2CAP_CONF_FLUSH_TO:
2703 chan->flush_to = val;
2704 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2708 case L2CAP_CONF_RFC:
2709 if (olen == sizeof(rfc))
2710 memcpy(&rfc, (void *)val, olen);
2712 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2713 rfc.mode != chan->mode)
2714 return -ECONNREFUSED;
2718 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2719 sizeof(rfc), (unsigned long) &rfc);
2722 case L2CAP_CONF_EWS:
2723 chan->tx_win = min_t(u16, val,
2724 L2CAP_DEFAULT_EXT_WINDOW);
2725 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2729 case L2CAP_CONF_EFS:
2730 if (olen == sizeof(efs))
2731 memcpy(&efs, (void *)val, olen);
2733 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2734 efs.stype != L2CAP_SERV_NOTRAFIC &&
2735 efs.stype != chan->local_stype)
2736 return -ECONNREFUSED;
2738 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2739 sizeof(efs), (unsigned long) &efs);
2744 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2745 return -ECONNREFUSED;
2747 chan->mode = rfc.mode;
2749 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2751 case L2CAP_MODE_ERTM:
2752 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2753 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2754 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2756 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2757 chan->local_msdu = le16_to_cpu(efs.msdu);
2758 chan->local_sdu_itime =
2759 le32_to_cpu(efs.sdu_itime);
2760 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2761 chan->local_flush_to =
2762 le32_to_cpu(efs.flush_to);
2766 case L2CAP_MODE_STREAMING:
2767 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2771 req->dcid = cpu_to_le16(chan->dcid);
2772 req->flags = cpu_to_le16(0x0000);
2777 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2779 struct l2cap_conf_rsp *rsp = data;
2780 void *ptr = rsp->data;
2782 BT_DBG("chan %p", chan);
2784 rsp->scid = cpu_to_le16(chan->dcid);
2785 rsp->result = cpu_to_le16(result);
2786 rsp->flags = cpu_to_le16(flags);
2791 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2793 struct l2cap_conn_rsp rsp;
2794 struct l2cap_conn *conn = chan->conn;
2797 rsp.scid = cpu_to_le16(chan->dcid);
2798 rsp.dcid = cpu_to_le16(chan->scid);
2799 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2800 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2801 l2cap_send_cmd(conn, chan->ident,
2802 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2804 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2807 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2808 l2cap_build_conf_req(chan, buf), buf);
2809 chan->num_conf_req++;
2812 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2816 struct l2cap_conf_rfc rfc;
2818 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2820 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2823 while (len >= L2CAP_CONF_OPT_SIZE) {
2824 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2827 case L2CAP_CONF_RFC:
2828 if (olen == sizeof(rfc))
2829 memcpy(&rfc, (void *)val, olen);
2834 /* Use sane default values in case a misbehaving remote device
2835 * did not send an RFC option.
2837 rfc.mode = chan->mode;
2838 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2839 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2840 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2842 BT_ERR("Expected RFC option was not found, using defaults");
2846 case L2CAP_MODE_ERTM:
2847 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2848 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2849 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2851 case L2CAP_MODE_STREAMING:
2852 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2856 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2858 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2860 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2863 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2864 cmd->ident == conn->info_ident) {
2865 cancel_delayed_work(&conn->info_timer);
2867 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2868 conn->info_ident = 0;
2870 l2cap_conn_start(conn);
2876 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2878 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2879 struct l2cap_conn_rsp rsp;
2880 struct l2cap_chan *chan = NULL, *pchan;
2881 struct sock *parent, *sk = NULL;
2882 int result, status = L2CAP_CS_NO_INFO;
2884 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2885 __le16 psm = req->psm;
2887 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2889 /* Check if we have socket listening on psm */
2890 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2892 result = L2CAP_CR_BAD_PSM;
2898 mutex_lock(&conn->chan_lock);
2901 /* Check if the ACL is secure enough (if not SDP) */
2902 if (psm != cpu_to_le16(0x0001) &&
2903 !hci_conn_check_link_mode(conn->hcon)) {
2904 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2905 result = L2CAP_CR_SEC_BLOCK;
2909 result = L2CAP_CR_NO_MEM;
2911 /* Check for backlog size */
2912 if (sk_acceptq_is_full(parent)) {
2913 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2917 chan = pchan->ops->new_connection(pchan->data);
2923 /* Check if we already have channel with that dcid */
2924 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2925 sock_set_flag(sk, SOCK_ZAPPED);
2926 chan->ops->close(chan->data);
2930 hci_conn_hold(conn->hcon);
2932 bacpy(&bt_sk(sk)->src, conn->src);
2933 bacpy(&bt_sk(sk)->dst, conn->dst);
2937 bt_accept_enqueue(parent, sk);
2939 __l2cap_chan_add(conn, chan);
2943 __set_chan_timer(chan, sk->sk_sndtimeo);
2945 chan->ident = cmd->ident;
2947 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2948 if (l2cap_chan_check_security(chan)) {
2949 if (bt_sk(sk)->defer_setup) {
2950 __l2cap_state_change(chan, BT_CONNECT2);
2951 result = L2CAP_CR_PEND;
2952 status = L2CAP_CS_AUTHOR_PEND;
2953 parent->sk_data_ready(parent, 0);
2955 __l2cap_state_change(chan, BT_CONFIG);
2956 result = L2CAP_CR_SUCCESS;
2957 status = L2CAP_CS_NO_INFO;
2960 __l2cap_state_change(chan, BT_CONNECT2);
2961 result = L2CAP_CR_PEND;
2962 status = L2CAP_CS_AUTHEN_PEND;
2965 __l2cap_state_change(chan, BT_CONNECT2);
2966 result = L2CAP_CR_PEND;
2967 status = L2CAP_CS_NO_INFO;
2971 release_sock(parent);
2972 mutex_unlock(&conn->chan_lock);
2975 rsp.scid = cpu_to_le16(scid);
2976 rsp.dcid = cpu_to_le16(dcid);
2977 rsp.result = cpu_to_le16(result);
2978 rsp.status = cpu_to_le16(status);
2979 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2981 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2982 struct l2cap_info_req info;
2983 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2985 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2986 conn->info_ident = l2cap_get_ident(conn);
2988 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2990 l2cap_send_cmd(conn, conn->info_ident,
2991 L2CAP_INFO_REQ, sizeof(info), &info);
2994 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2995 result == L2CAP_CR_SUCCESS) {
2997 set_bit(CONF_REQ_SENT, &chan->conf_state);
2998 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2999 l2cap_build_conf_req(chan, buf), buf);
3000 chan->num_conf_req++;
3006 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3008 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3009 u16 scid, dcid, result, status;
3010 struct l2cap_chan *chan;
3014 scid = __le16_to_cpu(rsp->scid);
3015 dcid = __le16_to_cpu(rsp->dcid);
3016 result = __le16_to_cpu(rsp->result);
3017 status = __le16_to_cpu(rsp->status);
3019 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3020 dcid, scid, result, status);
3022 mutex_lock(&conn->chan_lock);
3025 chan = __l2cap_get_chan_by_scid(conn, scid);
3031 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3040 l2cap_chan_lock(chan);
3043 case L2CAP_CR_SUCCESS:
3044 l2cap_state_change(chan, BT_CONFIG);
3047 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3049 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3052 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3053 l2cap_build_conf_req(chan, req), req);
3054 chan->num_conf_req++;
3058 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3062 l2cap_chan_del(chan, ECONNREFUSED);
3066 l2cap_chan_unlock(chan);
3069 mutex_unlock(&conn->chan_lock);
3074 static inline void set_default_fcs(struct l2cap_chan *chan)
3076 /* FCS is enabled only in ERTM or streaming mode, if one or both
3079 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3080 chan->fcs = L2CAP_FCS_NONE;
3081 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3082 chan->fcs = L2CAP_FCS_CRC16;
3085 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3087 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3090 struct l2cap_chan *chan;
3093 dcid = __le16_to_cpu(req->dcid);
3094 flags = __le16_to_cpu(req->flags);
3096 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3098 chan = l2cap_get_chan_by_scid(conn, dcid);
3102 l2cap_chan_lock(chan);
3104 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3105 struct l2cap_cmd_rej_cid rej;
3107 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3108 rej.scid = cpu_to_le16(chan->scid);
3109 rej.dcid = cpu_to_le16(chan->dcid);
3111 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3116 /* Reject if config buffer is too small. */
3117 len = cmd_len - sizeof(*req);
3118 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3119 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3120 l2cap_build_conf_rsp(chan, rsp,
3121 L2CAP_CONF_REJECT, flags), rsp);
3126 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3127 chan->conf_len += len;
3129 if (flags & 0x0001) {
3130 /* Incomplete config. Send empty response. */
3131 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3132 l2cap_build_conf_rsp(chan, rsp,
3133 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3137 /* Complete config. */
3138 len = l2cap_parse_conf_req(chan, rsp);
3140 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3144 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3145 chan->num_conf_rsp++;
3147 /* Reset config buffer. */
3150 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3153 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3154 set_default_fcs(chan);
3156 l2cap_state_change(chan, BT_CONNECTED);
3158 chan->next_tx_seq = 0;
3159 chan->expected_tx_seq = 0;
3160 skb_queue_head_init(&chan->tx_q);
3161 if (chan->mode == L2CAP_MODE_ERTM)
3162 err = l2cap_ertm_init(chan);
3165 l2cap_send_disconn_req(chan->conn, chan, -err);
3167 l2cap_chan_ready(chan);
3172 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3174 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3175 l2cap_build_conf_req(chan, buf), buf);
3176 chan->num_conf_req++;
3179 /* Got Conf Rsp PENDING from remote side and asume we sent
3180 Conf Rsp PENDING in the code above */
3181 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3182 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3184 /* check compatibility */
3186 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3187 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3189 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3190 l2cap_build_conf_rsp(chan, rsp,
3191 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3195 l2cap_chan_unlock(chan);
3199 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3201 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3202 u16 scid, flags, result;
3203 struct l2cap_chan *chan;
3204 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3207 scid = __le16_to_cpu(rsp->scid);
3208 flags = __le16_to_cpu(rsp->flags);
3209 result = __le16_to_cpu(rsp->result);
3211 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3214 chan = l2cap_get_chan_by_scid(conn, scid);
3218 l2cap_chan_lock(chan);
3221 case L2CAP_CONF_SUCCESS:
3222 l2cap_conf_rfc_get(chan, rsp->data, len);
3223 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3226 case L2CAP_CONF_PENDING:
3227 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3229 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3232 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3235 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3239 /* check compatibility */
3241 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3242 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3244 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3245 l2cap_build_conf_rsp(chan, buf,
3246 L2CAP_CONF_SUCCESS, 0x0000), buf);
3250 case L2CAP_CONF_UNACCEPT:
3251 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3254 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3255 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3259 /* throw out any old stored conf requests */
3260 result = L2CAP_CONF_SUCCESS;
3261 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3264 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3268 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3269 L2CAP_CONF_REQ, len, req);
3270 chan->num_conf_req++;
3271 if (result != L2CAP_CONF_SUCCESS)
3277 l2cap_chan_set_err(chan, ECONNRESET);
3279 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3280 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3287 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3289 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3290 set_default_fcs(chan);
3292 l2cap_state_change(chan, BT_CONNECTED);
3293 chan->next_tx_seq = 0;
3294 chan->expected_tx_seq = 0;
3295 skb_queue_head_init(&chan->tx_q);
3296 if (chan->mode == L2CAP_MODE_ERTM)
3297 err = l2cap_ertm_init(chan);
3300 l2cap_send_disconn_req(chan->conn, chan, -err);
3302 l2cap_chan_ready(chan);
3306 l2cap_chan_unlock(chan);
3310 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3312 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3313 struct l2cap_disconn_rsp rsp;
3315 struct l2cap_chan *chan;
3318 scid = __le16_to_cpu(req->scid);
3319 dcid = __le16_to_cpu(req->dcid);
3321 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3323 mutex_lock(&conn->chan_lock);
3325 chan = __l2cap_get_chan_by_scid(conn, dcid);
3327 mutex_unlock(&conn->chan_lock);
3331 l2cap_chan_lock(chan);
3335 rsp.dcid = cpu_to_le16(chan->scid);
3336 rsp.scid = cpu_to_le16(chan->dcid);
3337 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3340 sk->sk_shutdown = SHUTDOWN_MASK;
3343 l2cap_chan_del(chan, ECONNRESET);
3345 l2cap_chan_unlock(chan);
3347 chan->ops->close(chan->data);
3349 mutex_unlock(&conn->chan_lock);
3354 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3356 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3358 struct l2cap_chan *chan;
3360 scid = __le16_to_cpu(rsp->scid);
3361 dcid = __le16_to_cpu(rsp->dcid);
3363 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3365 mutex_lock(&conn->chan_lock);
3367 chan = __l2cap_get_chan_by_scid(conn, scid);
3369 mutex_unlock(&conn->chan_lock);
3373 l2cap_chan_lock(chan);
3375 l2cap_chan_del(chan, 0);
3377 l2cap_chan_unlock(chan);
3379 chan->ops->close(chan->data);
3381 mutex_unlock(&conn->chan_lock);
3386 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3388 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3391 type = __le16_to_cpu(req->type);
3393 BT_DBG("type 0x%4.4x", type);
3395 if (type == L2CAP_IT_FEAT_MASK) {
3397 u32 feat_mask = l2cap_feat_mask;
3398 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3399 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3400 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3402 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3405 feat_mask |= L2CAP_FEAT_EXT_FLOW
3406 | L2CAP_FEAT_EXT_WINDOW;
3408 put_unaligned_le32(feat_mask, rsp->data);
3409 l2cap_send_cmd(conn, cmd->ident,
3410 L2CAP_INFO_RSP, sizeof(buf), buf);
3411 } else if (type == L2CAP_IT_FIXED_CHAN) {
3413 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3416 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3418 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3420 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3421 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3422 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3423 l2cap_send_cmd(conn, cmd->ident,
3424 L2CAP_INFO_RSP, sizeof(buf), buf);
3426 struct l2cap_info_rsp rsp;
3427 rsp.type = cpu_to_le16(type);
3428 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3429 l2cap_send_cmd(conn, cmd->ident,
3430 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3436 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3438 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3441 type = __le16_to_cpu(rsp->type);
3442 result = __le16_to_cpu(rsp->result);
3444 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3446 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3447 if (cmd->ident != conn->info_ident ||
3448 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3451 cancel_delayed_work(&conn->info_timer);
3453 if (result != L2CAP_IR_SUCCESS) {
3454 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3455 conn->info_ident = 0;
3457 l2cap_conn_start(conn);
3463 case L2CAP_IT_FEAT_MASK:
3464 conn->feat_mask = get_unaligned_le32(rsp->data);
3466 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3467 struct l2cap_info_req req;
3468 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3470 conn->info_ident = l2cap_get_ident(conn);
3472 l2cap_send_cmd(conn, conn->info_ident,
3473 L2CAP_INFO_REQ, sizeof(req), &req);
3475 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3476 conn->info_ident = 0;
3478 l2cap_conn_start(conn);
3482 case L2CAP_IT_FIXED_CHAN:
3483 conn->fixed_chan_mask = rsp->data[0];
3484 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3485 conn->info_ident = 0;
3487 l2cap_conn_start(conn);
3494 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3495 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3498 struct l2cap_create_chan_req *req = data;
3499 struct l2cap_create_chan_rsp rsp;
3502 if (cmd_len != sizeof(*req))
3508 psm = le16_to_cpu(req->psm);
3509 scid = le16_to_cpu(req->scid);
3511 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3513 /* Placeholder: Always reject */
3515 rsp.scid = cpu_to_le16(scid);
3516 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3517 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3519 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3525 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3526 struct l2cap_cmd_hdr *cmd, void *data)
3528 BT_DBG("conn %p", conn);
3530 return l2cap_connect_rsp(conn, cmd, data);
3533 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3534 u16 icid, u16 result)
3536 struct l2cap_move_chan_rsp rsp;
3538 BT_DBG("icid %d, result %d", icid, result);
3540 rsp.icid = cpu_to_le16(icid);
3541 rsp.result = cpu_to_le16(result);
3543 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3546 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3547 struct l2cap_chan *chan, u16 icid, u16 result)
3549 struct l2cap_move_chan_cfm cfm;
3552 BT_DBG("icid %d, result %d", icid, result);
3554 ident = l2cap_get_ident(conn);
3556 chan->ident = ident;
3558 cfm.icid = cpu_to_le16(icid);
3559 cfm.result = cpu_to_le16(result);
3561 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3564 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3567 struct l2cap_move_chan_cfm_rsp rsp;
3569 BT_DBG("icid %d", icid);
3571 rsp.icid = cpu_to_le16(icid);
3572 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3575 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3576 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3578 struct l2cap_move_chan_req *req = data;
3580 u16 result = L2CAP_MR_NOT_ALLOWED;
3582 if (cmd_len != sizeof(*req))
3585 icid = le16_to_cpu(req->icid);
3587 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3592 /* Placeholder: Always refuse */
3593 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3598 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3599 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3601 struct l2cap_move_chan_rsp *rsp = data;
3604 if (cmd_len != sizeof(*rsp))
3607 icid = le16_to_cpu(rsp->icid);
3608 result = le16_to_cpu(rsp->result);
3610 BT_DBG("icid %d, result %d", icid, result);
3612 /* Placeholder: Always unconfirmed */
3613 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3618 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3619 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3621 struct l2cap_move_chan_cfm *cfm = data;
3624 if (cmd_len != sizeof(*cfm))
3627 icid = le16_to_cpu(cfm->icid);
3628 result = le16_to_cpu(cfm->result);
3630 BT_DBG("icid %d, result %d", icid, result);
3632 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3637 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3638 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3640 struct l2cap_move_chan_cfm_rsp *rsp = data;
3643 if (cmd_len != sizeof(*rsp))
3646 icid = le16_to_cpu(rsp->icid);
3648 BT_DBG("icid %d", icid);
3653 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3658 if (min > max || min < 6 || max > 3200)
3661 if (to_multiplier < 10 || to_multiplier > 3200)
3664 if (max >= to_multiplier * 8)
3667 max_latency = (to_multiplier * 8 / max) - 1;
3668 if (latency > 499 || latency > max_latency)
3674 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3675 struct l2cap_cmd_hdr *cmd, u8 *data)
3677 struct hci_conn *hcon = conn->hcon;
3678 struct l2cap_conn_param_update_req *req;
3679 struct l2cap_conn_param_update_rsp rsp;
3680 u16 min, max, latency, to_multiplier, cmd_len;
3683 if (!(hcon->link_mode & HCI_LM_MASTER))
3686 cmd_len = __le16_to_cpu(cmd->len);
3687 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3690 req = (struct l2cap_conn_param_update_req *) data;
3691 min = __le16_to_cpu(req->min);
3692 max = __le16_to_cpu(req->max);
3693 latency = __le16_to_cpu(req->latency);
3694 to_multiplier = __le16_to_cpu(req->to_multiplier);
3696 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3697 min, max, latency, to_multiplier);
3699 memset(&rsp, 0, sizeof(rsp));
3701 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3703 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3705 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3707 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3711 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3716 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3717 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3721 switch (cmd->code) {
3722 case L2CAP_COMMAND_REJ:
3723 l2cap_command_rej(conn, cmd, data);
3726 case L2CAP_CONN_REQ:
3727 err = l2cap_connect_req(conn, cmd, data);
3730 case L2CAP_CONN_RSP:
3731 err = l2cap_connect_rsp(conn, cmd, data);
3734 case L2CAP_CONF_REQ:
3735 err = l2cap_config_req(conn, cmd, cmd_len, data);
3738 case L2CAP_CONF_RSP:
3739 err = l2cap_config_rsp(conn, cmd, data);
3742 case L2CAP_DISCONN_REQ:
3743 err = l2cap_disconnect_req(conn, cmd, data);
3746 case L2CAP_DISCONN_RSP:
3747 err = l2cap_disconnect_rsp(conn, cmd, data);
3750 case L2CAP_ECHO_REQ:
3751 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3754 case L2CAP_ECHO_RSP:
3757 case L2CAP_INFO_REQ:
3758 err = l2cap_information_req(conn, cmd, data);
3761 case L2CAP_INFO_RSP:
3762 err = l2cap_information_rsp(conn, cmd, data);
3765 case L2CAP_CREATE_CHAN_REQ:
3766 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3769 case L2CAP_CREATE_CHAN_RSP:
3770 err = l2cap_create_channel_rsp(conn, cmd, data);
3773 case L2CAP_MOVE_CHAN_REQ:
3774 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3777 case L2CAP_MOVE_CHAN_RSP:
3778 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3781 case L2CAP_MOVE_CHAN_CFM:
3782 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3785 case L2CAP_MOVE_CHAN_CFM_RSP:
3786 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3790 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3798 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3799 struct l2cap_cmd_hdr *cmd, u8 *data)
3801 switch (cmd->code) {
3802 case L2CAP_COMMAND_REJ:
3805 case L2CAP_CONN_PARAM_UPDATE_REQ:
3806 return l2cap_conn_param_update_req(conn, cmd, data);
3808 case L2CAP_CONN_PARAM_UPDATE_RSP:
3812 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3817 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3818 struct sk_buff *skb)
3820 u8 *data = skb->data;
3822 struct l2cap_cmd_hdr cmd;
3825 l2cap_raw_recv(conn, skb);
3827 while (len >= L2CAP_CMD_HDR_SIZE) {
3829 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3830 data += L2CAP_CMD_HDR_SIZE;
3831 len -= L2CAP_CMD_HDR_SIZE;
3833 cmd_len = le16_to_cpu(cmd.len);
3835 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3837 if (cmd_len > len || !cmd.ident) {
3838 BT_DBG("corrupted command");
3842 if (conn->hcon->type == LE_LINK)
3843 err = l2cap_le_sig_cmd(conn, &cmd, data);
3845 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3848 struct l2cap_cmd_rej_unk rej;
3850 BT_ERR("Wrong link type (%d)", err);
3852 /* FIXME: Map err to a valid reason */
3853 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3854 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3864 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3866 u16 our_fcs, rcv_fcs;
3869 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3870 hdr_size = L2CAP_EXT_HDR_SIZE;
3872 hdr_size = L2CAP_ENH_HDR_SIZE;
3874 if (chan->fcs == L2CAP_FCS_CRC16) {
3875 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3876 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3877 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3879 if (our_fcs != rcv_fcs)
3885 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3889 chan->frames_sent = 0;
3891 control |= __set_reqseq(chan, chan->buffer_seq);
3893 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3894 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3895 l2cap_send_sframe(chan, control);
3896 set_bit(CONN_RNR_SENT, &chan->conn_state);
3899 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3900 l2cap_retransmit_frames(chan);
3902 l2cap_ertm_send(chan);
3904 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3905 chan->frames_sent == 0) {
3906 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3907 l2cap_send_sframe(chan, control);
3911 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3913 struct sk_buff *next_skb;
3914 int tx_seq_offset, next_tx_seq_offset;
3916 bt_cb(skb)->tx_seq = tx_seq;
3917 bt_cb(skb)->sar = sar;
3919 next_skb = skb_peek(&chan->srej_q);
3921 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3924 if (bt_cb(next_skb)->tx_seq == tx_seq)
3927 next_tx_seq_offset = __seq_offset(chan,
3928 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3930 if (next_tx_seq_offset > tx_seq_offset) {
3931 __skb_queue_before(&chan->srej_q, next_skb, skb);
3935 if (skb_queue_is_last(&chan->srej_q, next_skb))
3938 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3941 __skb_queue_tail(&chan->srej_q, skb);
3946 static void append_skb_frag(struct sk_buff *skb,
3947 struct sk_buff *new_frag, struct sk_buff **last_frag)
3949 /* skb->len reflects data in skb as well as all fragments
3950 * skb->data_len reflects only data in fragments
3952 if (!skb_has_frag_list(skb))
3953 skb_shinfo(skb)->frag_list = new_frag;
3955 new_frag->next = NULL;
3957 (*last_frag)->next = new_frag;
3958 *last_frag = new_frag;
3960 skb->len += new_frag->len;
3961 skb->data_len += new_frag->len;
3962 skb->truesize += new_frag->truesize;
3965 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3969 switch (__get_ctrl_sar(chan, control)) {
3970 case L2CAP_SAR_UNSEGMENTED:
3974 err = chan->ops->recv(chan->data, skb);
3977 case L2CAP_SAR_START:
3981 chan->sdu_len = get_unaligned_le16(skb->data);
3982 skb_pull(skb, L2CAP_SDULEN_SIZE);
3984 if (chan->sdu_len > chan->imtu) {
3989 if (skb->len >= chan->sdu_len)
3993 chan->sdu_last_frag = skb;
3999 case L2CAP_SAR_CONTINUE:
4003 append_skb_frag(chan->sdu, skb,
4004 &chan->sdu_last_frag);
4007 if (chan->sdu->len >= chan->sdu_len)
4017 append_skb_frag(chan->sdu, skb,
4018 &chan->sdu_last_frag);
4021 if (chan->sdu->len != chan->sdu_len)
4024 err = chan->ops->recv(chan->data, chan->sdu);
4027 /* Reassembly complete */
4029 chan->sdu_last_frag = NULL;
4037 kfree_skb(chan->sdu);
4039 chan->sdu_last_frag = NULL;
4046 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4048 BT_DBG("chan %p, Enter local busy", chan);
4050 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4051 l2cap_seq_list_clear(&chan->srej_list);
4053 __set_ack_timer(chan);
4056 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4060 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4063 control = __set_reqseq(chan, chan->buffer_seq);
4064 control |= __set_ctrl_poll(chan);
4065 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4066 l2cap_send_sframe(chan, control);
4067 chan->retry_count = 1;
4069 __clear_retrans_timer(chan);
4070 __set_monitor_timer(chan);
4072 set_bit(CONN_WAIT_F, &chan->conn_state);
4075 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4076 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4078 BT_DBG("chan %p, Exit local busy", chan);
4081 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4083 if (chan->mode == L2CAP_MODE_ERTM) {
4085 l2cap_ertm_enter_local_busy(chan);
4087 l2cap_ertm_exit_local_busy(chan);
4091 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4093 struct sk_buff *skb;
4096 while ((skb = skb_peek(&chan->srej_q)) &&
4097 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4100 if (bt_cb(skb)->tx_seq != tx_seq)
4103 skb = skb_dequeue(&chan->srej_q);
4104 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
4105 err = l2cap_reassemble_sdu(chan, skb, control);
4108 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4112 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4113 tx_seq = __next_seq(chan, tx_seq);
4117 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4119 struct srej_list *l, *tmp;
4122 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4123 if (l->tx_seq == tx_seq) {
4128 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4129 control |= __set_reqseq(chan, l->tx_seq);
4130 l2cap_send_sframe(chan, control);
4132 list_add_tail(&l->list, &chan->srej_l);
4136 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4138 struct srej_list *new;
4141 while (tx_seq != chan->expected_tx_seq) {
4142 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4143 control |= __set_reqseq(chan, chan->expected_tx_seq);
4144 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4145 l2cap_send_sframe(chan, control);
4147 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4151 new->tx_seq = chan->expected_tx_seq;
4153 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4155 list_add_tail(&new->list, &chan->srej_l);
4158 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4163 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4165 u16 tx_seq = __get_txseq(chan, rx_control);
4166 u16 req_seq = __get_reqseq(chan, rx_control);
4167 u8 sar = __get_ctrl_sar(chan, rx_control);
4168 int tx_seq_offset, expected_tx_seq_offset;
4169 int num_to_ack = (chan->tx_win/6) + 1;
4172 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4173 tx_seq, rx_control);
4175 if (__is_ctrl_final(chan, rx_control) &&
4176 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4177 __clear_monitor_timer(chan);
4178 if (chan->unacked_frames > 0)
4179 __set_retrans_timer(chan);
4180 clear_bit(CONN_WAIT_F, &chan->conn_state);
4183 chan->expected_ack_seq = req_seq;
4184 l2cap_drop_acked_frames(chan);
4186 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4188 /* invalid tx_seq */
4189 if (tx_seq_offset >= chan->tx_win) {
4190 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4194 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4195 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4196 l2cap_send_ack(chan);
4200 if (tx_seq == chan->expected_tx_seq)
4203 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4204 struct srej_list *first;
4206 first = list_first_entry(&chan->srej_l,
4207 struct srej_list, list);
4208 if (tx_seq == first->tx_seq) {
4209 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4210 l2cap_check_srej_gap(chan, tx_seq);
4212 list_del(&first->list);
4215 if (list_empty(&chan->srej_l)) {
4216 chan->buffer_seq = chan->buffer_seq_srej;
4217 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4218 l2cap_send_ack(chan);
4219 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4222 struct srej_list *l;
4224 /* duplicated tx_seq */
4225 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4228 list_for_each_entry(l, &chan->srej_l, list) {
4229 if (l->tx_seq == tx_seq) {
4230 l2cap_resend_srejframe(chan, tx_seq);
4235 err = l2cap_send_srejframe(chan, tx_seq);
4237 l2cap_send_disconn_req(chan->conn, chan, -err);
4242 expected_tx_seq_offset = __seq_offset(chan,
4243 chan->expected_tx_seq, chan->buffer_seq);
4245 /* duplicated tx_seq */
4246 if (tx_seq_offset < expected_tx_seq_offset)
4249 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4251 BT_DBG("chan %p, Enter SREJ", chan);
4253 INIT_LIST_HEAD(&chan->srej_l);
4254 chan->buffer_seq_srej = chan->buffer_seq;
4256 __skb_queue_head_init(&chan->srej_q);
4257 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4259 /* Set P-bit only if there are some I-frames to ack. */
4260 if (__clear_ack_timer(chan))
4261 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4263 err = l2cap_send_srejframe(chan, tx_seq);
4265 l2cap_send_disconn_req(chan->conn, chan, -err);
4272 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4274 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4275 bt_cb(skb)->tx_seq = tx_seq;
4276 bt_cb(skb)->sar = sar;
4277 __skb_queue_tail(&chan->srej_q, skb);
4281 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4282 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4285 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4289 if (__is_ctrl_final(chan, rx_control)) {
4290 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4291 l2cap_retransmit_frames(chan);
4295 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4296 if (chan->num_acked == num_to_ack - 1)
4297 l2cap_send_ack(chan);
4299 __set_ack_timer(chan);
4308 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4310 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4311 __get_reqseq(chan, rx_control), rx_control);
4313 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4314 l2cap_drop_acked_frames(chan);
4316 if (__is_ctrl_poll(chan, rx_control)) {
4317 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4318 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4319 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4320 (chan->unacked_frames > 0))
4321 __set_retrans_timer(chan);
4323 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4324 l2cap_send_srejtail(chan);
4326 l2cap_send_i_or_rr_or_rnr(chan);
4329 } else if (__is_ctrl_final(chan, rx_control)) {
4330 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4332 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4333 l2cap_retransmit_frames(chan);
4336 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4337 (chan->unacked_frames > 0))
4338 __set_retrans_timer(chan);
4340 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4341 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4342 l2cap_send_ack(chan);
4344 l2cap_ertm_send(chan);
4348 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4350 u16 tx_seq = __get_reqseq(chan, rx_control);
4352 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4354 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4356 chan->expected_ack_seq = tx_seq;
4357 l2cap_drop_acked_frames(chan);
4359 if (__is_ctrl_final(chan, rx_control)) {
4360 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4361 l2cap_retransmit_frames(chan);
4363 l2cap_retransmit_frames(chan);
4365 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4366 set_bit(CONN_REJ_ACT, &chan->conn_state);
4369 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4371 u16 tx_seq = __get_reqseq(chan, rx_control);
4373 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4375 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4377 if (__is_ctrl_poll(chan, rx_control)) {
4378 chan->expected_ack_seq = tx_seq;
4379 l2cap_drop_acked_frames(chan);
4381 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4382 l2cap_retransmit_one_frame(chan, tx_seq);
4384 l2cap_ertm_send(chan);
4386 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4387 chan->srej_save_reqseq = tx_seq;
4388 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4390 } else if (__is_ctrl_final(chan, rx_control)) {
4391 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4392 chan->srej_save_reqseq == tx_seq)
4393 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4395 l2cap_retransmit_one_frame(chan, tx_seq);
4397 l2cap_retransmit_one_frame(chan, tx_seq);
4398 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4399 chan->srej_save_reqseq = tx_seq;
4400 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4405 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4407 u16 tx_seq = __get_reqseq(chan, rx_control);
4409 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4411 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4412 chan->expected_ack_seq = tx_seq;
4413 l2cap_drop_acked_frames(chan);
4415 if (__is_ctrl_poll(chan, rx_control))
4416 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4418 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4419 __clear_retrans_timer(chan);
4420 if (__is_ctrl_poll(chan, rx_control))
4421 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4425 if (__is_ctrl_poll(chan, rx_control)) {
4426 l2cap_send_srejtail(chan);
4428 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4429 l2cap_send_sframe(chan, rx_control);
4433 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4435 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4437 if (__is_ctrl_final(chan, rx_control) &&
4438 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4439 __clear_monitor_timer(chan);
4440 if (chan->unacked_frames > 0)
4441 __set_retrans_timer(chan);
4442 clear_bit(CONN_WAIT_F, &chan->conn_state);
4445 switch (__get_ctrl_super(chan, rx_control)) {
4446 case L2CAP_SUPER_RR:
4447 l2cap_data_channel_rrframe(chan, rx_control);
4450 case L2CAP_SUPER_REJ:
4451 l2cap_data_channel_rejframe(chan, rx_control);
4454 case L2CAP_SUPER_SREJ:
4455 l2cap_data_channel_srejframe(chan, rx_control);
4458 case L2CAP_SUPER_RNR:
4459 l2cap_data_channel_rnrframe(chan, rx_control);
4467 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4471 int len, next_tx_seq_offset, req_seq_offset;
4473 __unpack_control(chan, skb);
4475 control = __get_control(chan, skb->data);
4476 skb_pull(skb, __ctrl_size(chan));
4480 * We can just drop the corrupted I-frame here.
4481 * Receiver will miss it and start proper recovery
4482 * procedures and ask retransmission.
4484 if (l2cap_check_fcs(chan, skb))
4487 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4488 len -= L2CAP_SDULEN_SIZE;
4490 if (chan->fcs == L2CAP_FCS_CRC16)
4491 len -= L2CAP_FCS_SIZE;
4493 if (len > chan->mps) {
4494 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4498 req_seq = __get_reqseq(chan, control);
4500 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4502 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4503 chan->expected_ack_seq);
4505 /* check for invalid req-seq */
4506 if (req_seq_offset > next_tx_seq_offset) {
4507 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4511 if (!__is_sframe(chan, control)) {
4513 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4517 l2cap_data_channel_iframe(chan, control, skb);
4521 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4525 l2cap_data_channel_sframe(chan, control, skb);
4535 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4537 struct l2cap_chan *chan;
4542 chan = l2cap_get_chan_by_scid(conn, cid);
4544 BT_DBG("unknown cid 0x%4.4x", cid);
4545 /* Drop packet and return */
4550 l2cap_chan_lock(chan);
4552 BT_DBG("chan %p, len %d", chan, skb->len);
4554 if (chan->state != BT_CONNECTED)
4557 switch (chan->mode) {
4558 case L2CAP_MODE_BASIC:
4559 /* If socket recv buffers overflows we drop data here
4560 * which is *bad* because L2CAP has to be reliable.
4561 * But we don't have any other choice. L2CAP doesn't
4562 * provide flow control mechanism. */
4564 if (chan->imtu < skb->len)
4567 if (!chan->ops->recv(chan->data, skb))
4571 case L2CAP_MODE_ERTM:
4572 l2cap_ertm_data_rcv(chan, skb);
4576 case L2CAP_MODE_STREAMING:
4577 control = __get_control(chan, skb->data);
4578 skb_pull(skb, __ctrl_size(chan));
4581 if (l2cap_check_fcs(chan, skb))
4584 if (__is_sar_start(chan, control))
4585 len -= L2CAP_SDULEN_SIZE;
4587 if (chan->fcs == L2CAP_FCS_CRC16)
4588 len -= L2CAP_FCS_SIZE;
4590 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4593 tx_seq = __get_txseq(chan, control);
4595 if (chan->expected_tx_seq != tx_seq) {
4596 /* Frame(s) missing - must discard partial SDU */
4597 kfree_skb(chan->sdu);
4599 chan->sdu_last_frag = NULL;
4602 /* TODO: Notify userland of missing data */
4605 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4607 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4608 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4613 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4621 l2cap_chan_unlock(chan);
4626 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4628 struct l2cap_chan *chan;
4630 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4634 BT_DBG("chan %p, len %d", chan, skb->len);
4636 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4639 if (chan->imtu < skb->len)
4642 if (!chan->ops->recv(chan->data, skb))
4651 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4652 struct sk_buff *skb)
4654 struct l2cap_chan *chan;
4656 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4660 BT_DBG("chan %p, len %d", chan, skb->len);
4662 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4665 if (chan->imtu < skb->len)
4668 if (!chan->ops->recv(chan->data, skb))
4677 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4679 struct l2cap_hdr *lh = (void *) skb->data;
4683 skb_pull(skb, L2CAP_HDR_SIZE);
4684 cid = __le16_to_cpu(lh->cid);
4685 len = __le16_to_cpu(lh->len);
4687 if (len != skb->len) {
4692 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4695 case L2CAP_CID_LE_SIGNALING:
4696 case L2CAP_CID_SIGNALING:
4697 l2cap_sig_channel(conn, skb);
4700 case L2CAP_CID_CONN_LESS:
4701 psm = get_unaligned((__le16 *) skb->data);
4703 l2cap_conless_channel(conn, psm, skb);
4706 case L2CAP_CID_LE_DATA:
4707 l2cap_att_channel(conn, cid, skb);
4711 if (smp_sig_channel(conn, skb))
4712 l2cap_conn_del(conn->hcon, EACCES);
4716 l2cap_data_channel(conn, cid, skb);
4721 /* ---- L2CAP interface with lower layer (HCI) ---- */
4723 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4725 int exact = 0, lm1 = 0, lm2 = 0;
4726 struct l2cap_chan *c;
4728 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4730 /* Find listening sockets and check their link_mode */
4731 read_lock(&chan_list_lock);
4732 list_for_each_entry(c, &chan_list, global_l) {
4733 struct sock *sk = c->sk;
4735 if (c->state != BT_LISTEN)
4738 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4739 lm1 |= HCI_LM_ACCEPT;
4740 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4741 lm1 |= HCI_LM_MASTER;
4743 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4744 lm2 |= HCI_LM_ACCEPT;
4745 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4746 lm2 |= HCI_LM_MASTER;
4749 read_unlock(&chan_list_lock);
4751 return exact ? lm1 : lm2;
4754 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4756 struct l2cap_conn *conn;
4758 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4761 conn = l2cap_conn_add(hcon, status);
4763 l2cap_conn_ready(conn);
4765 l2cap_conn_del(hcon, bt_to_errno(status));
4770 int l2cap_disconn_ind(struct hci_conn *hcon)
4772 struct l2cap_conn *conn = hcon->l2cap_data;
4774 BT_DBG("hcon %p", hcon);
4777 return HCI_ERROR_REMOTE_USER_TERM;
4778 return conn->disc_reason;
4781 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4783 BT_DBG("hcon %p reason %d", hcon, reason);
4785 l2cap_conn_del(hcon, bt_to_errno(reason));
4789 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4791 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4794 if (encrypt == 0x00) {
4795 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4796 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4797 } else if (chan->sec_level == BT_SECURITY_HIGH)
4798 l2cap_chan_close(chan, ECONNREFUSED);
4800 if (chan->sec_level == BT_SECURITY_MEDIUM)
4801 __clear_chan_timer(chan);
4805 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4807 struct l2cap_conn *conn = hcon->l2cap_data;
4808 struct l2cap_chan *chan;
4813 BT_DBG("conn %p", conn);
4815 if (hcon->type == LE_LINK) {
4816 smp_distribute_keys(conn, 0);
4817 cancel_delayed_work(&conn->security_timer);
4820 mutex_lock(&conn->chan_lock);
4822 list_for_each_entry(chan, &conn->chan_l, list) {
4823 l2cap_chan_lock(chan);
4825 BT_DBG("chan->scid %d", chan->scid);
4827 if (chan->scid == L2CAP_CID_LE_DATA) {
4828 if (!status && encrypt) {
4829 chan->sec_level = hcon->sec_level;
4830 l2cap_chan_ready(chan);
4833 l2cap_chan_unlock(chan);
4837 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4838 l2cap_chan_unlock(chan);
4842 if (!status && (chan->state == BT_CONNECTED ||
4843 chan->state == BT_CONFIG)) {
4844 l2cap_check_encryption(chan, encrypt);
4845 l2cap_chan_unlock(chan);
4849 if (chan->state == BT_CONNECT) {
4851 l2cap_send_conn_req(chan);
4853 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4855 } else if (chan->state == BT_CONNECT2) {
4856 struct sock *sk = chan->sk;
4857 struct l2cap_conn_rsp rsp;
4863 if (bt_sk(sk)->defer_setup) {
4864 struct sock *parent = bt_sk(sk)->parent;
4865 res = L2CAP_CR_PEND;
4866 stat = L2CAP_CS_AUTHOR_PEND;
4868 parent->sk_data_ready(parent, 0);
4870 __l2cap_state_change(chan, BT_CONFIG);
4871 res = L2CAP_CR_SUCCESS;
4872 stat = L2CAP_CS_NO_INFO;
4875 __l2cap_state_change(chan, BT_DISCONN);
4876 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4877 res = L2CAP_CR_SEC_BLOCK;
4878 stat = L2CAP_CS_NO_INFO;
4883 rsp.scid = cpu_to_le16(chan->dcid);
4884 rsp.dcid = cpu_to_le16(chan->scid);
4885 rsp.result = cpu_to_le16(res);
4886 rsp.status = cpu_to_le16(stat);
4887 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4891 l2cap_chan_unlock(chan);
4894 mutex_unlock(&conn->chan_lock);
4899 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4901 struct l2cap_conn *conn = hcon->l2cap_data;
4904 conn = l2cap_conn_add(hcon, 0);
4909 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4911 if (!(flags & ACL_CONT)) {
4912 struct l2cap_hdr *hdr;
4913 struct l2cap_chan *chan;
4918 BT_ERR("Unexpected start frame (len %d)", skb->len);
4919 kfree_skb(conn->rx_skb);
4920 conn->rx_skb = NULL;
4922 l2cap_conn_unreliable(conn, ECOMM);
4925 /* Start fragment always begin with Basic L2CAP header */
4926 if (skb->len < L2CAP_HDR_SIZE) {
4927 BT_ERR("Frame is too short (len %d)", skb->len);
4928 l2cap_conn_unreliable(conn, ECOMM);
4932 hdr = (struct l2cap_hdr *) skb->data;
4933 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4934 cid = __le16_to_cpu(hdr->cid);
4936 if (len == skb->len) {
4937 /* Complete frame received */
4938 l2cap_recv_frame(conn, skb);
4942 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4944 if (skb->len > len) {
4945 BT_ERR("Frame is too long (len %d, expected len %d)",
4947 l2cap_conn_unreliable(conn, ECOMM);
4951 chan = l2cap_get_chan_by_scid(conn, cid);
4953 if (chan && chan->sk) {
4954 struct sock *sk = chan->sk;
4957 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4958 BT_ERR("Frame exceeding recv MTU (len %d, "
4962 l2cap_conn_unreliable(conn, ECOMM);
4968 /* Allocate skb for the complete frame (with header) */
4969 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4973 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4975 conn->rx_len = len - skb->len;
4977 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4979 if (!conn->rx_len) {
4980 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4981 l2cap_conn_unreliable(conn, ECOMM);
4985 if (skb->len > conn->rx_len) {
4986 BT_ERR("Fragment is too long (len %d, expected %d)",
4987 skb->len, conn->rx_len);
4988 kfree_skb(conn->rx_skb);
4989 conn->rx_skb = NULL;
4991 l2cap_conn_unreliable(conn, ECOMM);
4995 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4997 conn->rx_len -= skb->len;
4999 if (!conn->rx_len) {
5000 /* Complete frame received */
5001 l2cap_recv_frame(conn, conn->rx_skb);
5002 conn->rx_skb = NULL;
5011 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5013 struct l2cap_chan *c;
5015 read_lock(&chan_list_lock);
5017 list_for_each_entry(c, &chan_list, global_l) {
5018 struct sock *sk = c->sk;
5020 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5021 batostr(&bt_sk(sk)->src),
5022 batostr(&bt_sk(sk)->dst),
5023 c->state, __le16_to_cpu(c->psm),
5024 c->scid, c->dcid, c->imtu, c->omtu,
5025 c->sec_level, c->mode);
5028 read_unlock(&chan_list_lock);
5033 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5035 return single_open(file, l2cap_debugfs_show, inode->i_private);
5038 static const struct file_operations l2cap_debugfs_fops = {
5039 .open = l2cap_debugfs_open,
5041 .llseek = seq_lseek,
5042 .release = single_release,
5045 static struct dentry *l2cap_debugfs;
5047 int __init l2cap_init(void)
5051 err = l2cap_init_sockets();
5056 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5057 bt_debugfs, NULL, &l2cap_debugfs_fops);
5059 BT_ERR("Failed to create L2CAP debug file");
5065 void l2cap_exit(void)
5067 debugfs_remove(l2cap_debugfs);
5068 l2cap_cleanup_sockets();
5071 module_param(disable_ertm, bool, 0644);
5072 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");