2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
68 list_for_each_entry(c, &conn->chan_l, list) {
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
80 list_for_each_entry(c, &conn->chan_l, list) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
98 mutex_unlock(&conn->chan_lock);
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
115 mutex_unlock(&conn->chan_lock);
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
123 struct l2cap_chan *c;
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
141 mutex_unlock(&conn->chan_lock);
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
161 write_lock(&chan_list_lock);
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
186 write_unlock(&chan_list_lock);
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192 write_lock(&chan_list_lock);
196 write_unlock(&chan_list_lock);
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203 u16 cid = L2CAP_CID_DYN_START;
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
219 chan->ops->state_change(chan, state);
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
224 struct sock *sk = chan->sk;
227 __l2cap_state_change(chan, state);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233 struct sock *sk = chan->sk;
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240 struct sock *sk = chan->sk;
243 __l2cap_chan_set_err(chan, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
544 l2cap_chan_hold(chan);
546 list_add(&chan->list, &conn->chan_l);
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
558 struct l2cap_conn *conn = chan->conn;
560 __clear_chan_timer(chan);
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
569 l2cap_chan_put(chan);
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_put(conn->hcon);
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
580 if (chan->hs_hchan) {
581 struct hci_chan *hs_hchan = chan->hs_hchan;
583 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 amp_disconnect_logical_link(hs_hchan);
587 chan->ops->teardown(chan, err);
589 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
593 case L2CAP_MODE_BASIC:
596 case L2CAP_MODE_ERTM:
597 __clear_retrans_timer(chan);
598 __clear_monitor_timer(chan);
599 __clear_ack_timer(chan);
601 skb_queue_purge(&chan->srej_q);
603 l2cap_seq_list_free(&chan->srej_list);
604 l2cap_seq_list_free(&chan->retrans_list);
608 case L2CAP_MODE_STREAMING:
609 skb_queue_purge(&chan->tx_q);
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
618 struct l2cap_conn *conn = chan->conn;
619 struct sock *sk = chan->sk;
621 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
624 switch (chan->state) {
626 chan->ops->teardown(chan, 0);
631 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 conn->hcon->type == ACL_LINK) {
633 __set_chan_timer(chan, sk->sk_sndtimeo);
634 l2cap_send_disconn_req(chan, reason);
636 l2cap_chan_del(chan, reason);
640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 conn->hcon->type == ACL_LINK) {
642 struct l2cap_conn_rsp rsp;
645 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 result = L2CAP_CR_SEC_BLOCK;
648 result = L2CAP_CR_BAD_PSM;
649 l2cap_state_change(chan, BT_DISCONN);
651 rsp.scid = cpu_to_le16(chan->dcid);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.result = cpu_to_le16(result);
654 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
659 l2cap_chan_del(chan, reason);
664 l2cap_chan_del(chan, reason);
668 chan->ops->teardown(chan, 0);
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
675 if (chan->chan_type == L2CAP_CHAN_RAW) {
676 switch (chan->sec_level) {
677 case BT_SECURITY_HIGH:
678 return HCI_AT_DEDICATED_BONDING_MITM;
679 case BT_SECURITY_MEDIUM:
680 return HCI_AT_DEDICATED_BONDING;
682 return HCI_AT_NO_BONDING;
684 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 if (chan->sec_level == BT_SECURITY_LOW)
686 chan->sec_level = BT_SECURITY_SDP;
688 if (chan->sec_level == BT_SECURITY_HIGH)
689 return HCI_AT_NO_BONDING_MITM;
691 return HCI_AT_NO_BONDING;
693 switch (chan->sec_level) {
694 case BT_SECURITY_HIGH:
695 return HCI_AT_GENERAL_BONDING_MITM;
696 case BT_SECURITY_MEDIUM:
697 return HCI_AT_GENERAL_BONDING;
699 return HCI_AT_NO_BONDING;
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
707 struct l2cap_conn *conn = chan->conn;
710 auth_type = l2cap_get_auth_type(chan);
712 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
719 /* Get next available identificator.
720 * 1 - 128 are used by kernel.
721 * 129 - 199 are reserved.
722 * 200 - 254 are used by utilities like l2ping, etc.
725 spin_lock(&conn->lock);
727 if (++conn->tx_ident > 128)
732 spin_unlock(&conn->lock);
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
740 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
743 BT_DBG("code 0x%2.2x", code);
748 if (lmp_no_flush_capable(conn->hcon->hdev))
749 flags = ACL_START_NO_FLUSH;
753 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 skb->priority = HCI_PRIO_MAX;
756 hci_send_acl(conn->hchan, skb, flags);
759 static bool __chan_is_moving(struct l2cap_chan *chan)
761 return chan->move_state != L2CAP_MOVE_STABLE &&
762 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
767 struct hci_conn *hcon = chan->conn->hcon;
770 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
773 if (chan->hs_hcon && !__chan_is_moving(chan)) {
775 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
782 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 lmp_no_flush_capable(hcon->hdev))
784 flags = ACL_START_NO_FLUSH;
788 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 hci_send_acl(chan->conn->hchan, skb, flags);
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
794 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
797 if (enh & L2CAP_CTRL_FRAME_TYPE) {
800 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
808 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
818 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
821 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
824 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
832 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
840 static inline void __unpack_control(struct l2cap_chan *chan,
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 __unpack_extended_control(get_unaligned_le32(skb->data),
845 &bt_cb(skb)->control);
846 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
848 __unpack_enhanced_control(get_unaligned_le16(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
858 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
861 if (control->sframe) {
862 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
866 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
877 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
880 if (control->sframe) {
881 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 packed |= L2CAP_CTRL_FRAME_TYPE;
885 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
892 static inline void __pack_control(struct l2cap_chan *chan,
893 struct l2cap_ctrl *control,
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 put_unaligned_le32(__pack_extended_control(control),
898 skb->data + L2CAP_HDR_SIZE);
900 put_unaligned_le16(__pack_enhanced_control(control),
901 skb->data + L2CAP_HDR_SIZE);
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
907 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 return L2CAP_EXT_HDR_SIZE;
910 return L2CAP_ENH_HDR_SIZE;
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
917 struct l2cap_hdr *lh;
918 int hlen = __ertm_hdr_size(chan);
920 if (chan->fcs == L2CAP_FCS_CRC16)
921 hlen += L2CAP_FCS_SIZE;
923 skb = bt_skb_alloc(hlen, GFP_KERNEL);
926 return ERR_PTR(-ENOMEM);
928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 lh->cid = cpu_to_le16(chan->dcid);
932 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
935 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
937 if (chan->fcs == L2CAP_FCS_CRC16) {
938 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
942 skb->priority = HCI_PRIO_MAX;
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 struct l2cap_ctrl *control)
952 BT_DBG("chan %p, control %p", chan, control);
954 if (!control->sframe)
957 if (__chan_is_moving(chan))
960 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
964 if (control->super == L2CAP_SUPER_RR)
965 clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 else if (control->super == L2CAP_SUPER_RNR)
967 set_bit(CONN_RNR_SENT, &chan->conn_state);
969 if (control->super != L2CAP_SUPER_SREJ) {
970 chan->last_acked_seq = control->reqseq;
971 __clear_ack_timer(chan);
974 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 control->final, control->poll, control->super);
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 control_field = __pack_extended_control(control);
980 control_field = __pack_enhanced_control(control);
982 skb = l2cap_create_sframe_pdu(chan, control_field);
984 l2cap_do_send(chan, skb);
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
989 struct l2cap_ctrl control;
991 BT_DBG("chan %p, poll %d", chan, poll);
993 memset(&control, 0, sizeof(control));
997 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 control.super = L2CAP_SUPER_RNR;
1000 control.super = L2CAP_SUPER_RR;
1002 control.reqseq = chan->buffer_seq;
1003 l2cap_send_sframe(chan, &control);
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1008 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1011 static bool __amp_capable(struct l2cap_chan *chan)
1013 struct l2cap_conn *conn = chan->conn;
1016 hci_amp_capable() &&
1017 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1026 /* Check EFS parameters */
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1032 struct l2cap_conn *conn = chan->conn;
1033 struct l2cap_conn_req req;
1035 req.scid = cpu_to_le16(chan->scid);
1036 req.psm = chan->psm;
1038 chan->ident = l2cap_get_ident(conn);
1040 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1042 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1047 struct l2cap_create_chan_req req;
1048 req.scid = cpu_to_le16(chan->scid);
1049 req.psm = chan->psm;
1050 req.amp_id = amp_id;
1052 chan->ident = l2cap_get_ident(chan->conn);
1054 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1060 struct sk_buff *skb;
1062 BT_DBG("chan %p", chan);
1064 if (chan->mode != L2CAP_MODE_ERTM)
1067 __clear_retrans_timer(chan);
1068 __clear_monitor_timer(chan);
1069 __clear_ack_timer(chan);
1071 chan->retry_count = 0;
1072 skb_queue_walk(&chan->tx_q, skb) {
1073 if (bt_cb(skb)->control.retries)
1074 bt_cb(skb)->control.retries = 1;
1079 chan->expected_tx_seq = chan->buffer_seq;
1081 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 l2cap_seq_list_clear(&chan->retrans_list);
1084 l2cap_seq_list_clear(&chan->srej_list);
1085 skb_queue_purge(&chan->srej_q);
1087 chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 chan->rx_state = L2CAP_RX_STATE_MOVE;
1090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1095 u8 move_role = chan->move_role;
1096 BT_DBG("chan %p", chan);
1098 chan->move_state = L2CAP_MOVE_STABLE;
1099 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1101 if (chan->mode != L2CAP_MODE_ERTM)
1104 switch (move_role) {
1105 case L2CAP_MOVE_ROLE_INITIATOR:
1106 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1109 case L2CAP_MOVE_ROLE_RESPONDER:
1110 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1117 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 chan->conf_state = 0;
1119 __clear_chan_timer(chan);
1121 chan->state = BT_CONNECTED;
1123 chan->ops->ready(chan);
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1128 if (__amp_capable(chan)) {
1129 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 a2mp_discover_amp(chan);
1132 l2cap_send_conn_req(chan);
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1138 struct l2cap_conn *conn = chan->conn;
1140 if (conn->hcon->type == LE_LINK) {
1141 l2cap_chan_ready(chan);
1145 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1149 if (l2cap_chan_check_security(chan) &&
1150 __l2cap_no_conn_pending(chan)) {
1151 l2cap_start_connection(chan);
1154 struct l2cap_info_req req;
1155 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 conn->info_ident = l2cap_get_ident(conn);
1160 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1162 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1169 u32 local_feat_mask = l2cap_feat_mask;
1171 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1174 case L2CAP_MODE_ERTM:
1175 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 case L2CAP_MODE_STREAMING:
1177 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1185 struct sock *sk = chan->sk;
1186 struct l2cap_conn *conn = chan->conn;
1187 struct l2cap_disconn_req req;
1192 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 __clear_retrans_timer(chan);
1194 __clear_monitor_timer(chan);
1195 __clear_ack_timer(chan);
1198 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 l2cap_state_change(chan, BT_DISCONN);
1203 req.dcid = cpu_to_le16(chan->dcid);
1204 req.scid = cpu_to_le16(chan->scid);
1205 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1209 __l2cap_state_change(chan, BT_DISCONN);
1210 __l2cap_chan_set_err(chan, err);
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1217 struct l2cap_chan *chan, *tmp;
1219 BT_DBG("conn %p", conn);
1221 mutex_lock(&conn->chan_lock);
1223 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 struct sock *sk = chan->sk;
1226 l2cap_chan_lock(chan);
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 l2cap_chan_unlock(chan);
1233 if (chan->state == BT_CONNECT) {
1234 if (!l2cap_chan_check_security(chan) ||
1235 !__l2cap_no_conn_pending(chan)) {
1236 l2cap_chan_unlock(chan);
1240 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 && test_bit(CONF_STATE2_DEVICE,
1242 &chan->conf_state)) {
1243 l2cap_chan_close(chan, ECONNRESET);
1244 l2cap_chan_unlock(chan);
1248 l2cap_start_connection(chan);
1250 } else if (chan->state == BT_CONNECT2) {
1251 struct l2cap_conn_rsp rsp;
1253 rsp.scid = cpu_to_le16(chan->dcid);
1254 rsp.dcid = cpu_to_le16(chan->scid);
1256 if (l2cap_chan_check_security(chan)) {
1258 if (test_bit(BT_SK_DEFER_SETUP,
1259 &bt_sk(sk)->flags)) {
1260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 chan->ops->defer(chan);
1265 __l2cap_state_change(chan, BT_CONFIG);
1266 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1271 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1275 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1278 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 rsp.result != L2CAP_CR_SUCCESS) {
1280 l2cap_chan_unlock(chan);
1284 set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 l2cap_build_conf_req(chan, buf), buf);
1287 chan->num_conf_req++;
1290 l2cap_chan_unlock(chan);
1293 mutex_unlock(&conn->chan_lock);
1296 /* Find socket with cid and source/destination bdaddr.
1297 * Returns closest match, locked.
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1303 struct l2cap_chan *c, *c1 = NULL;
1305 read_lock(&chan_list_lock);
1307 list_for_each_entry(c, &chan_list, global_l) {
1308 struct sock *sk = c->sk;
1310 if (state && c->state != state)
1313 if (c->scid == cid) {
1314 int src_match, dst_match;
1315 int src_any, dst_any;
1318 src_match = !bacmp(&bt_sk(sk)->src, src);
1319 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 if (src_match && dst_match) {
1321 read_unlock(&chan_list_lock);
1326 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 if ((src_match && dst_any) || (src_any && dst_match) ||
1329 (src_any && dst_any))
1334 read_unlock(&chan_list_lock);
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1341 struct sock *parent, *sk;
1342 struct l2cap_chan *chan, *pchan;
1346 /* Check if we have socket listening on cid */
1347 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 conn->src, conn->dst);
1356 chan = pchan->ops->new_connection(pchan);
1362 hci_conn_hold(conn->hcon);
1363 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1365 bacpy(&bt_sk(sk)->src, conn->src);
1366 bacpy(&bt_sk(sk)->dst, conn->dst);
1368 l2cap_chan_add(conn, chan);
1370 l2cap_chan_ready(chan);
1373 release_sock(parent);
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1378 struct l2cap_chan *chan;
1379 struct hci_conn *hcon = conn->hcon;
1381 BT_DBG("conn %p", conn);
1383 if (!hcon->out && hcon->type == LE_LINK)
1384 l2cap_le_conn_ready(conn);
1386 if (hcon->out && hcon->type == LE_LINK)
1387 smp_conn_security(hcon, hcon->pending_sec_level);
1389 mutex_lock(&conn->chan_lock);
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1393 l2cap_chan_lock(chan);
1395 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 l2cap_chan_unlock(chan);
1400 if (hcon->type == LE_LINK) {
1401 if (smp_conn_security(hcon, chan->sec_level))
1402 l2cap_chan_ready(chan);
1404 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 struct sock *sk = chan->sk;
1406 __clear_chan_timer(chan);
1408 __l2cap_state_change(chan, BT_CONNECTED);
1409 sk->sk_state_change(sk);
1412 } else if (chan->state == BT_CONNECT)
1413 l2cap_do_start(chan);
1415 l2cap_chan_unlock(chan);
1418 mutex_unlock(&conn->chan_lock);
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1424 struct l2cap_chan *chan;
1426 BT_DBG("conn %p", conn);
1428 mutex_lock(&conn->chan_lock);
1430 list_for_each_entry(chan, &conn->chan_l, list) {
1431 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 l2cap_chan_set_err(chan, err);
1435 mutex_unlock(&conn->chan_lock);
1438 static void l2cap_info_timeout(struct work_struct *work)
1440 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 conn->info_ident = 0;
1446 l2cap_conn_start(conn);
1449 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1451 struct l2cap_conn *conn = hcon->l2cap_data;
1452 struct l2cap_chan *chan, *l;
1457 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1459 kfree_skb(conn->rx_skb);
1461 mutex_lock(&conn->chan_lock);
1464 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1465 l2cap_chan_hold(chan);
1466 l2cap_chan_lock(chan);
1468 l2cap_chan_del(chan, err);
1470 l2cap_chan_unlock(chan);
1472 chan->ops->close(chan);
1473 l2cap_chan_put(chan);
1476 mutex_unlock(&conn->chan_lock);
1478 hci_chan_del(conn->hchan);
1480 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1481 cancel_delayed_work_sync(&conn->info_timer);
1483 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1484 cancel_delayed_work_sync(&conn->security_timer);
1485 smp_chan_destroy(conn);
1488 hcon->l2cap_data = NULL;
1492 static void security_timeout(struct work_struct *work)
1494 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1495 security_timer.work);
1497 BT_DBG("conn %p", conn);
1499 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1500 smp_chan_destroy(conn);
1501 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1505 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1507 struct l2cap_conn *conn = hcon->l2cap_data;
1508 struct hci_chan *hchan;
1513 hchan = hci_chan_create(hcon);
1517 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1519 hci_chan_del(hchan);
1523 hcon->l2cap_data = conn;
1525 conn->hchan = hchan;
1527 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1529 switch (hcon->type) {
1531 conn->mtu = hcon->hdev->block_mtu;
1535 if (hcon->hdev->le_mtu) {
1536 conn->mtu = hcon->hdev->le_mtu;
1542 conn->mtu = hcon->hdev->acl_mtu;
1546 conn->src = &hcon->hdev->bdaddr;
1547 conn->dst = &hcon->dst;
1549 conn->feat_mask = 0;
1551 spin_lock_init(&conn->lock);
1552 mutex_init(&conn->chan_lock);
1554 INIT_LIST_HEAD(&conn->chan_l);
1556 if (hcon->type == LE_LINK)
1557 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1559 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1561 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1566 /* ---- Socket interface ---- */
1568 /* Find socket with psm and source / destination bdaddr.
1569 * Returns closest match.
1571 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1575 struct l2cap_chan *c, *c1 = NULL;
1577 read_lock(&chan_list_lock);
1579 list_for_each_entry(c, &chan_list, global_l) {
1580 struct sock *sk = c->sk;
1582 if (state && c->state != state)
1585 if (c->psm == psm) {
1586 int src_match, dst_match;
1587 int src_any, dst_any;
1590 src_match = !bacmp(&bt_sk(sk)->src, src);
1591 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1592 if (src_match && dst_match) {
1593 read_unlock(&chan_list_lock);
1598 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1599 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1600 if ((src_match && dst_any) || (src_any && dst_match) ||
1601 (src_any && dst_any))
1606 read_unlock(&chan_list_lock);
1611 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1612 bdaddr_t *dst, u8 dst_type)
1614 struct sock *sk = chan->sk;
1615 bdaddr_t *src = &bt_sk(sk)->src;
1616 struct l2cap_conn *conn;
1617 struct hci_conn *hcon;
1618 struct hci_dev *hdev;
1622 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1623 dst_type, __le16_to_cpu(psm));
1625 hdev = hci_get_route(dst, src);
1627 return -EHOSTUNREACH;
1631 l2cap_chan_lock(chan);
1633 /* PSM must be odd and lsb of upper byte must be 0 */
1634 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1635 chan->chan_type != L2CAP_CHAN_RAW) {
1640 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1645 switch (chan->mode) {
1646 case L2CAP_MODE_BASIC:
1648 case L2CAP_MODE_ERTM:
1649 case L2CAP_MODE_STREAMING:
1658 switch (chan->state) {
1662 /* Already connecting */
1667 /* Already connected */
1681 /* Set destination address and psm */
1683 bacpy(&bt_sk(sk)->dst, dst);
1689 auth_type = l2cap_get_auth_type(chan);
1691 if (chan->dcid == L2CAP_CID_LE_DATA)
1692 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1693 chan->sec_level, auth_type);
1695 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1696 chan->sec_level, auth_type);
1699 err = PTR_ERR(hcon);
1703 conn = l2cap_conn_add(hcon, 0);
1710 if (hcon->type == LE_LINK) {
1713 if (!list_empty(&conn->chan_l)) {
1722 /* Update source addr of the socket */
1723 bacpy(src, conn->src);
1725 l2cap_chan_unlock(chan);
1726 l2cap_chan_add(conn, chan);
1727 l2cap_chan_lock(chan);
1729 l2cap_state_change(chan, BT_CONNECT);
1730 __set_chan_timer(chan, sk->sk_sndtimeo);
1732 if (hcon->state == BT_CONNECTED) {
1733 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1734 __clear_chan_timer(chan);
1735 if (l2cap_chan_check_security(chan))
1736 l2cap_state_change(chan, BT_CONNECTED);
1738 l2cap_do_start(chan);
1744 l2cap_chan_unlock(chan);
1745 hci_dev_unlock(hdev);
1750 int __l2cap_wait_ack(struct sock *sk)
1752 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1753 DECLARE_WAITQUEUE(wait, current);
1757 add_wait_queue(sk_sleep(sk), &wait);
1758 set_current_state(TASK_INTERRUPTIBLE);
1759 while (chan->unacked_frames > 0 && chan->conn) {
1763 if (signal_pending(current)) {
1764 err = sock_intr_errno(timeo);
1769 timeo = schedule_timeout(timeo);
1771 set_current_state(TASK_INTERRUPTIBLE);
1773 err = sock_error(sk);
1777 set_current_state(TASK_RUNNING);
1778 remove_wait_queue(sk_sleep(sk), &wait);
1782 static void l2cap_monitor_timeout(struct work_struct *work)
1784 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1785 monitor_timer.work);
1787 BT_DBG("chan %p", chan);
1789 l2cap_chan_lock(chan);
1792 l2cap_chan_unlock(chan);
1793 l2cap_chan_put(chan);
1797 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1799 l2cap_chan_unlock(chan);
1800 l2cap_chan_put(chan);
1803 static void l2cap_retrans_timeout(struct work_struct *work)
1805 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1806 retrans_timer.work);
1808 BT_DBG("chan %p", chan);
1810 l2cap_chan_lock(chan);
1813 l2cap_chan_unlock(chan);
1814 l2cap_chan_put(chan);
1818 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1819 l2cap_chan_unlock(chan);
1820 l2cap_chan_put(chan);
1823 static void l2cap_streaming_send(struct l2cap_chan *chan,
1824 struct sk_buff_head *skbs)
1826 struct sk_buff *skb;
1827 struct l2cap_ctrl *control;
1829 BT_DBG("chan %p, skbs %p", chan, skbs);
1831 if (__chan_is_moving(chan))
1834 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1836 while (!skb_queue_empty(&chan->tx_q)) {
1838 skb = skb_dequeue(&chan->tx_q);
1840 bt_cb(skb)->control.retries = 1;
1841 control = &bt_cb(skb)->control;
1843 control->reqseq = 0;
1844 control->txseq = chan->next_tx_seq;
1846 __pack_control(chan, control, skb);
1848 if (chan->fcs == L2CAP_FCS_CRC16) {
1849 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1850 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1853 l2cap_do_send(chan, skb);
1855 BT_DBG("Sent txseq %u", control->txseq);
1857 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1858 chan->frames_sent++;
1862 static int l2cap_ertm_send(struct l2cap_chan *chan)
1864 struct sk_buff *skb, *tx_skb;
1865 struct l2cap_ctrl *control;
1868 BT_DBG("chan %p", chan);
1870 if (chan->state != BT_CONNECTED)
1873 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1876 if (__chan_is_moving(chan))
1879 while (chan->tx_send_head &&
1880 chan->unacked_frames < chan->remote_tx_win &&
1881 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1883 skb = chan->tx_send_head;
1885 bt_cb(skb)->control.retries = 1;
1886 control = &bt_cb(skb)->control;
1888 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1891 control->reqseq = chan->buffer_seq;
1892 chan->last_acked_seq = chan->buffer_seq;
1893 control->txseq = chan->next_tx_seq;
1895 __pack_control(chan, control, skb);
1897 if (chan->fcs == L2CAP_FCS_CRC16) {
1898 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1899 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1902 /* Clone after data has been modified. Data is assumed to be
1903 read-only (for locking purposes) on cloned sk_buffs.
1905 tx_skb = skb_clone(skb, GFP_KERNEL);
1910 __set_retrans_timer(chan);
1912 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1913 chan->unacked_frames++;
1914 chan->frames_sent++;
1917 if (skb_queue_is_last(&chan->tx_q, skb))
1918 chan->tx_send_head = NULL;
1920 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1922 l2cap_do_send(chan, tx_skb);
1923 BT_DBG("Sent txseq %u", control->txseq);
1926 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1927 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1932 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1934 struct l2cap_ctrl control;
1935 struct sk_buff *skb;
1936 struct sk_buff *tx_skb;
1939 BT_DBG("chan %p", chan);
1941 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1944 if (__chan_is_moving(chan))
1947 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1948 seq = l2cap_seq_list_pop(&chan->retrans_list);
1950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1952 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1957 bt_cb(skb)->control.retries++;
1958 control = bt_cb(skb)->control;
1960 if (chan->max_tx != 0 &&
1961 bt_cb(skb)->control.retries > chan->max_tx) {
1962 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1963 l2cap_send_disconn_req(chan, ECONNRESET);
1964 l2cap_seq_list_clear(&chan->retrans_list);
1968 control.reqseq = chan->buffer_seq;
1969 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1974 if (skb_cloned(skb)) {
1975 /* Cloned sk_buffs are read-only, so we need a
1978 tx_skb = skb_copy(skb, GFP_KERNEL);
1980 tx_skb = skb_clone(skb, GFP_KERNEL);
1984 l2cap_seq_list_clear(&chan->retrans_list);
1988 /* Update skb contents */
1989 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1990 put_unaligned_le32(__pack_extended_control(&control),
1991 tx_skb->data + L2CAP_HDR_SIZE);
1993 put_unaligned_le16(__pack_enhanced_control(&control),
1994 tx_skb->data + L2CAP_HDR_SIZE);
1997 if (chan->fcs == L2CAP_FCS_CRC16) {
1998 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1999 put_unaligned_le16(fcs, skb_put(tx_skb,
2003 l2cap_do_send(chan, tx_skb);
2005 BT_DBG("Resent txseq %d", control.txseq);
2007 chan->last_acked_seq = chan->buffer_seq;
2011 static void l2cap_retransmit(struct l2cap_chan *chan,
2012 struct l2cap_ctrl *control)
2014 BT_DBG("chan %p, control %p", chan, control);
2016 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2017 l2cap_ertm_resend(chan);
2020 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2021 struct l2cap_ctrl *control)
2023 struct sk_buff *skb;
2025 BT_DBG("chan %p, control %p", chan, control);
2028 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2030 l2cap_seq_list_clear(&chan->retrans_list);
2032 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2035 if (chan->unacked_frames) {
2036 skb_queue_walk(&chan->tx_q, skb) {
2037 if (bt_cb(skb)->control.txseq == control->reqseq ||
2038 skb == chan->tx_send_head)
2042 skb_queue_walk_from(&chan->tx_q, skb) {
2043 if (skb == chan->tx_send_head)
2046 l2cap_seq_list_append(&chan->retrans_list,
2047 bt_cb(skb)->control.txseq);
2050 l2cap_ertm_resend(chan);
2054 static void l2cap_send_ack(struct l2cap_chan *chan)
2056 struct l2cap_ctrl control;
2057 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2058 chan->last_acked_seq);
2061 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2062 chan, chan->last_acked_seq, chan->buffer_seq);
2064 memset(&control, 0, sizeof(control));
2067 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2068 chan->rx_state == L2CAP_RX_STATE_RECV) {
2069 __clear_ack_timer(chan);
2070 control.super = L2CAP_SUPER_RNR;
2071 control.reqseq = chan->buffer_seq;
2072 l2cap_send_sframe(chan, &control);
2074 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2075 l2cap_ertm_send(chan);
2076 /* If any i-frames were sent, they included an ack */
2077 if (chan->buffer_seq == chan->last_acked_seq)
2081 /* Ack now if the window is 3/4ths full.
2082 * Calculate without mul or div
2084 threshold = chan->ack_win;
2085 threshold += threshold << 1;
2088 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2091 if (frames_to_ack >= threshold) {
2092 __clear_ack_timer(chan);
2093 control.super = L2CAP_SUPER_RR;
2094 control.reqseq = chan->buffer_seq;
2095 l2cap_send_sframe(chan, &control);
2100 __set_ack_timer(chan);
2104 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2105 struct msghdr *msg, int len,
2106 int count, struct sk_buff *skb)
2108 struct l2cap_conn *conn = chan->conn;
2109 struct sk_buff **frag;
2112 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2118 /* Continuation fragments (no L2CAP header) */
2119 frag = &skb_shinfo(skb)->frag_list;
2121 struct sk_buff *tmp;
2123 count = min_t(unsigned int, conn->mtu, len);
2125 tmp = chan->ops->alloc_skb(chan, count,
2126 msg->msg_flags & MSG_DONTWAIT);
2128 return PTR_ERR(tmp);
2132 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2135 (*frag)->priority = skb->priority;
2140 skb->len += (*frag)->len;
2141 skb->data_len += (*frag)->len;
2143 frag = &(*frag)->next;
2149 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2150 struct msghdr *msg, size_t len,
2153 struct l2cap_conn *conn = chan->conn;
2154 struct sk_buff *skb;
2155 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2156 struct l2cap_hdr *lh;
2158 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2160 count = min_t(unsigned int, (conn->mtu - hlen), len);
2162 skb = chan->ops->alloc_skb(chan, count + hlen,
2163 msg->msg_flags & MSG_DONTWAIT);
2167 skb->priority = priority;
2169 /* Create L2CAP header */
2170 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2171 lh->cid = cpu_to_le16(chan->dcid);
2172 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2173 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2175 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2176 if (unlikely(err < 0)) {
2178 return ERR_PTR(err);
2183 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2184 struct msghdr *msg, size_t len,
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff *skb;
2190 struct l2cap_hdr *lh;
2192 BT_DBG("chan %p len %zu", chan, len);
2194 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2196 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2197 msg->msg_flags & MSG_DONTWAIT);
2201 skb->priority = priority;
2203 /* Create L2CAP header */
2204 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2205 lh->cid = cpu_to_le16(chan->dcid);
2206 lh->len = cpu_to_le16(len);
2208 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2209 if (unlikely(err < 0)) {
2211 return ERR_PTR(err);
2216 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2217 struct msghdr *msg, size_t len,
2220 struct l2cap_conn *conn = chan->conn;
2221 struct sk_buff *skb;
2222 int err, count, hlen;
2223 struct l2cap_hdr *lh;
2225 BT_DBG("chan %p len %zu", chan, len);
2228 return ERR_PTR(-ENOTCONN);
2230 hlen = __ertm_hdr_size(chan);
2233 hlen += L2CAP_SDULEN_SIZE;
2235 if (chan->fcs == L2CAP_FCS_CRC16)
2236 hlen += L2CAP_FCS_SIZE;
2238 count = min_t(unsigned int, (conn->mtu - hlen), len);
2240 skb = chan->ops->alloc_skb(chan, count + hlen,
2241 msg->msg_flags & MSG_DONTWAIT);
2245 /* Create L2CAP header */
2246 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2250 /* Control header is populated later */
2251 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2252 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2254 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2257 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2259 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2260 if (unlikely(err < 0)) {
2262 return ERR_PTR(err);
2265 bt_cb(skb)->control.fcs = chan->fcs;
2266 bt_cb(skb)->control.retries = 0;
2270 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2271 struct sk_buff_head *seg_queue,
2272 struct msghdr *msg, size_t len)
2274 struct sk_buff *skb;
2279 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2281 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2282 * so fragmented skbs are not used. The HCI layer's handling
2283 * of fragmented skbs is not compatible with ERTM's queueing.
2286 /* PDU size is derived from the HCI MTU */
2287 pdu_len = chan->conn->mtu;
2289 /* Constrain PDU size for BR/EDR connections */
2291 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2293 /* Adjust for largest possible L2CAP overhead. */
2295 pdu_len -= L2CAP_FCS_SIZE;
2297 pdu_len -= __ertm_hdr_size(chan);
2299 /* Remote device may have requested smaller PDUs */
2300 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2302 if (len <= pdu_len) {
2303 sar = L2CAP_SAR_UNSEGMENTED;
2307 sar = L2CAP_SAR_START;
2309 pdu_len -= L2CAP_SDULEN_SIZE;
2313 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2316 __skb_queue_purge(seg_queue);
2317 return PTR_ERR(skb);
2320 bt_cb(skb)->control.sar = sar;
2321 __skb_queue_tail(seg_queue, skb);
2326 pdu_len += L2CAP_SDULEN_SIZE;
2329 if (len <= pdu_len) {
2330 sar = L2CAP_SAR_END;
2333 sar = L2CAP_SAR_CONTINUE;
2340 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2343 struct sk_buff *skb;
2345 struct sk_buff_head seg_queue;
2347 /* Connectionless channel */
2348 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2349 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2351 return PTR_ERR(skb);
2353 l2cap_do_send(chan, skb);
2357 switch (chan->mode) {
2358 case L2CAP_MODE_BASIC:
2359 /* Check outgoing MTU */
2360 if (len > chan->omtu)
2363 /* Create a basic PDU */
2364 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2366 return PTR_ERR(skb);
2368 l2cap_do_send(chan, skb);
2372 case L2CAP_MODE_ERTM:
2373 case L2CAP_MODE_STREAMING:
2374 /* Check outgoing MTU */
2375 if (len > chan->omtu) {
2380 __skb_queue_head_init(&seg_queue);
2382 /* Do segmentation before calling in to the state machine,
2383 * since it's possible to block while waiting for memory
2386 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2388 /* The channel could have been closed while segmenting,
2389 * check that it is still connected.
2391 if (chan->state != BT_CONNECTED) {
2392 __skb_queue_purge(&seg_queue);
2399 if (chan->mode == L2CAP_MODE_ERTM)
2400 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2402 l2cap_streaming_send(chan, &seg_queue);
2406 /* If the skbs were not queued for sending, they'll still be in
2407 * seg_queue and need to be purged.
2409 __skb_queue_purge(&seg_queue);
2413 BT_DBG("bad state %1.1x", chan->mode);
2420 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2422 struct l2cap_ctrl control;
2425 BT_DBG("chan %p, txseq %u", chan, txseq);
2427 memset(&control, 0, sizeof(control));
2429 control.super = L2CAP_SUPER_SREJ;
2431 for (seq = chan->expected_tx_seq; seq != txseq;
2432 seq = __next_seq(chan, seq)) {
2433 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2434 control.reqseq = seq;
2435 l2cap_send_sframe(chan, &control);
2436 l2cap_seq_list_append(&chan->srej_list, seq);
2440 chan->expected_tx_seq = __next_seq(chan, txseq);
2443 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2445 struct l2cap_ctrl control;
2447 BT_DBG("chan %p", chan);
2449 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2452 memset(&control, 0, sizeof(control));
2454 control.super = L2CAP_SUPER_SREJ;
2455 control.reqseq = chan->srej_list.tail;
2456 l2cap_send_sframe(chan, &control);
2459 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2461 struct l2cap_ctrl control;
2465 BT_DBG("chan %p, txseq %u", chan, txseq);
2467 memset(&control, 0, sizeof(control));
2469 control.super = L2CAP_SUPER_SREJ;
2471 /* Capture initial list head to allow only one pass through the list. */
2472 initial_head = chan->srej_list.head;
2475 seq = l2cap_seq_list_pop(&chan->srej_list);
2476 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2479 control.reqseq = seq;
2480 l2cap_send_sframe(chan, &control);
2481 l2cap_seq_list_append(&chan->srej_list, seq);
2482 } while (chan->srej_list.head != initial_head);
2485 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2487 struct sk_buff *acked_skb;
2490 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2492 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2495 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2496 chan->expected_ack_seq, chan->unacked_frames);
2498 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2499 ackseq = __next_seq(chan, ackseq)) {
2501 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2503 skb_unlink(acked_skb, &chan->tx_q);
2504 kfree_skb(acked_skb);
2505 chan->unacked_frames--;
2509 chan->expected_ack_seq = reqseq;
2511 if (chan->unacked_frames == 0)
2512 __clear_retrans_timer(chan);
2514 BT_DBG("unacked_frames %u", chan->unacked_frames);
2517 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2519 BT_DBG("chan %p", chan);
2521 chan->expected_tx_seq = chan->buffer_seq;
2522 l2cap_seq_list_clear(&chan->srej_list);
2523 skb_queue_purge(&chan->srej_q);
2524 chan->rx_state = L2CAP_RX_STATE_RECV;
2527 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2528 struct l2cap_ctrl *control,
2529 struct sk_buff_head *skbs, u8 event)
2531 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2535 case L2CAP_EV_DATA_REQUEST:
2536 if (chan->tx_send_head == NULL)
2537 chan->tx_send_head = skb_peek(skbs);
2539 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2540 l2cap_ertm_send(chan);
2542 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2543 BT_DBG("Enter LOCAL_BUSY");
2544 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2546 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2547 /* The SREJ_SENT state must be aborted if we are to
2548 * enter the LOCAL_BUSY state.
2550 l2cap_abort_rx_srej_sent(chan);
2553 l2cap_send_ack(chan);
2556 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2557 BT_DBG("Exit LOCAL_BUSY");
2558 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2560 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2561 struct l2cap_ctrl local_control;
2563 memset(&local_control, 0, sizeof(local_control));
2564 local_control.sframe = 1;
2565 local_control.super = L2CAP_SUPER_RR;
2566 local_control.poll = 1;
2567 local_control.reqseq = chan->buffer_seq;
2568 l2cap_send_sframe(chan, &local_control);
2570 chan->retry_count = 1;
2571 __set_monitor_timer(chan);
2572 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2575 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2576 l2cap_process_reqseq(chan, control->reqseq);
2578 case L2CAP_EV_EXPLICIT_POLL:
2579 l2cap_send_rr_or_rnr(chan, 1);
2580 chan->retry_count = 1;
2581 __set_monitor_timer(chan);
2582 __clear_ack_timer(chan);
2583 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2585 case L2CAP_EV_RETRANS_TO:
2586 l2cap_send_rr_or_rnr(chan, 1);
2587 chan->retry_count = 1;
2588 __set_monitor_timer(chan);
2589 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2591 case L2CAP_EV_RECV_FBIT:
2592 /* Nothing to process */
2599 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2600 struct l2cap_ctrl *control,
2601 struct sk_buff_head *skbs, u8 event)
2603 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2607 case L2CAP_EV_DATA_REQUEST:
2608 if (chan->tx_send_head == NULL)
2609 chan->tx_send_head = skb_peek(skbs);
2610 /* Queue data, but don't send. */
2611 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2613 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2614 BT_DBG("Enter LOCAL_BUSY");
2615 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2617 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2618 /* The SREJ_SENT state must be aborted if we are to
2619 * enter the LOCAL_BUSY state.
2621 l2cap_abort_rx_srej_sent(chan);
2624 l2cap_send_ack(chan);
2627 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2628 BT_DBG("Exit LOCAL_BUSY");
2629 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2631 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2632 struct l2cap_ctrl local_control;
2633 memset(&local_control, 0, sizeof(local_control));
2634 local_control.sframe = 1;
2635 local_control.super = L2CAP_SUPER_RR;
2636 local_control.poll = 1;
2637 local_control.reqseq = chan->buffer_seq;
2638 l2cap_send_sframe(chan, &local_control);
2640 chan->retry_count = 1;
2641 __set_monitor_timer(chan);
2642 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2645 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2646 l2cap_process_reqseq(chan, control->reqseq);
2650 case L2CAP_EV_RECV_FBIT:
2651 if (control && control->final) {
2652 __clear_monitor_timer(chan);
2653 if (chan->unacked_frames > 0)
2654 __set_retrans_timer(chan);
2655 chan->retry_count = 0;
2656 chan->tx_state = L2CAP_TX_STATE_XMIT;
2657 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2660 case L2CAP_EV_EXPLICIT_POLL:
2663 case L2CAP_EV_MONITOR_TO:
2664 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2665 l2cap_send_rr_or_rnr(chan, 1);
2666 __set_monitor_timer(chan);
2667 chan->retry_count++;
2669 l2cap_send_disconn_req(chan, ECONNABORTED);
2677 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2678 struct sk_buff_head *skbs, u8 event)
2680 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2681 chan, control, skbs, event, chan->tx_state);
2683 switch (chan->tx_state) {
2684 case L2CAP_TX_STATE_XMIT:
2685 l2cap_tx_state_xmit(chan, control, skbs, event);
2687 case L2CAP_TX_STATE_WAIT_F:
2688 l2cap_tx_state_wait_f(chan, control, skbs, event);
2696 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2697 struct l2cap_ctrl *control)
2699 BT_DBG("chan %p, control %p", chan, control);
2700 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2703 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2704 struct l2cap_ctrl *control)
2706 BT_DBG("chan %p, control %p", chan, control);
2707 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2710 /* Copy frame to all raw sockets on that connection */
2711 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2713 struct sk_buff *nskb;
2714 struct l2cap_chan *chan;
2716 BT_DBG("conn %p", conn);
2718 mutex_lock(&conn->chan_lock);
2720 list_for_each_entry(chan, &conn->chan_l, list) {
2721 struct sock *sk = chan->sk;
2722 if (chan->chan_type != L2CAP_CHAN_RAW)
2725 /* Don't send frame to the socket it came from */
2728 nskb = skb_clone(skb, GFP_KERNEL);
2732 if (chan->ops->recv(chan, nskb))
2736 mutex_unlock(&conn->chan_lock);
2739 /* ---- L2CAP signalling commands ---- */
2740 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2741 u8 ident, u16 dlen, void *data)
2743 struct sk_buff *skb, **frag;
2744 struct l2cap_cmd_hdr *cmd;
2745 struct l2cap_hdr *lh;
2748 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2749 conn, code, ident, dlen);
2751 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2752 count = min_t(unsigned int, conn->mtu, len);
2754 skb = bt_skb_alloc(count, GFP_KERNEL);
2758 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2759 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2761 if (conn->hcon->type == LE_LINK)
2762 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2764 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2766 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2769 cmd->len = cpu_to_le16(dlen);
2772 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2773 memcpy(skb_put(skb, count), data, count);
2779 /* Continuation fragments (no L2CAP header) */
2780 frag = &skb_shinfo(skb)->frag_list;
2782 count = min_t(unsigned int, conn->mtu, len);
2784 *frag = bt_skb_alloc(count, GFP_KERNEL);
2788 memcpy(skb_put(*frag, count), data, count);
2793 frag = &(*frag)->next;
2803 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2806 struct l2cap_conf_opt *opt = *ptr;
2809 len = L2CAP_CONF_OPT_SIZE + opt->len;
2817 *val = *((u8 *) opt->val);
2821 *val = get_unaligned_le16(opt->val);
2825 *val = get_unaligned_le32(opt->val);
2829 *val = (unsigned long) opt->val;
2833 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2837 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2839 struct l2cap_conf_opt *opt = *ptr;
2841 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2848 *((u8 *) opt->val) = val;
2852 put_unaligned_le16(val, opt->val);
2856 put_unaligned_le32(val, opt->val);
2860 memcpy(opt->val, (void *) val, len);
2864 *ptr += L2CAP_CONF_OPT_SIZE + len;
2867 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2869 struct l2cap_conf_efs efs;
2871 switch (chan->mode) {
2872 case L2CAP_MODE_ERTM:
2873 efs.id = chan->local_id;
2874 efs.stype = chan->local_stype;
2875 efs.msdu = cpu_to_le16(chan->local_msdu);
2876 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2877 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2878 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2881 case L2CAP_MODE_STREAMING:
2883 efs.stype = L2CAP_SERV_BESTEFFORT;
2884 efs.msdu = cpu_to_le16(chan->local_msdu);
2885 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2894 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2895 (unsigned long) &efs);
2898 static void l2cap_ack_timeout(struct work_struct *work)
2900 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2904 BT_DBG("chan %p", chan);
2906 l2cap_chan_lock(chan);
2908 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2909 chan->last_acked_seq);
2912 l2cap_send_rr_or_rnr(chan, 0);
2914 l2cap_chan_unlock(chan);
2915 l2cap_chan_put(chan);
2918 int l2cap_ertm_init(struct l2cap_chan *chan)
2922 chan->next_tx_seq = 0;
2923 chan->expected_tx_seq = 0;
2924 chan->expected_ack_seq = 0;
2925 chan->unacked_frames = 0;
2926 chan->buffer_seq = 0;
2927 chan->frames_sent = 0;
2928 chan->last_acked_seq = 0;
2930 chan->sdu_last_frag = NULL;
2933 skb_queue_head_init(&chan->tx_q);
2935 chan->local_amp_id = 0;
2937 chan->move_state = L2CAP_MOVE_STABLE;
2938 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2940 if (chan->mode != L2CAP_MODE_ERTM)
2943 chan->rx_state = L2CAP_RX_STATE_RECV;
2944 chan->tx_state = L2CAP_TX_STATE_XMIT;
2946 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2947 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2948 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2950 skb_queue_head_init(&chan->srej_q);
2952 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2956 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2958 l2cap_seq_list_free(&chan->srej_list);
2963 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2966 case L2CAP_MODE_STREAMING:
2967 case L2CAP_MODE_ERTM:
2968 if (l2cap_mode_supported(mode, remote_feat_mask))
2972 return L2CAP_MODE_BASIC;
2976 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2978 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2981 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2983 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2986 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2987 struct l2cap_conf_rfc *rfc)
2989 if (chan->local_amp_id && chan->hs_hcon) {
2990 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2992 /* Class 1 devices have must have ERTM timeouts
2993 * exceeding the Link Supervision Timeout. The
2994 * default Link Supervision Timeout for AMP
2995 * controllers is 10 seconds.
2997 * Class 1 devices use 0xffffffff for their
2998 * best-effort flush timeout, so the clamping logic
2999 * will result in a timeout that meets the above
3000 * requirement. ERTM timeouts are 16-bit values, so
3001 * the maximum timeout is 65.535 seconds.
3004 /* Convert timeout to milliseconds and round */
3005 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3007 /* This is the recommended formula for class 2 devices
3008 * that start ERTM timers when packets are sent to the
3011 ertm_to = 3 * ertm_to + 500;
3013 if (ertm_to > 0xffff)
3016 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3017 rfc->monitor_timeout = rfc->retrans_timeout;
3019 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3020 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3024 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3026 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3027 __l2cap_ews_supported(chan)) {
3028 /* use extended control field */
3029 set_bit(FLAG_EXT_CTRL, &chan->flags);
3030 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3032 chan->tx_win = min_t(u16, chan->tx_win,
3033 L2CAP_DEFAULT_TX_WINDOW);
3034 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3036 chan->ack_win = chan->tx_win;
3039 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3041 struct l2cap_conf_req *req = data;
3042 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3043 void *ptr = req->data;
3046 BT_DBG("chan %p", chan);
3048 if (chan->num_conf_req || chan->num_conf_rsp)
3051 switch (chan->mode) {
3052 case L2CAP_MODE_STREAMING:
3053 case L2CAP_MODE_ERTM:
3054 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3057 if (__l2cap_efs_supported(chan))
3058 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3062 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3067 if (chan->imtu != L2CAP_DEFAULT_MTU)
3068 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3070 switch (chan->mode) {
3071 case L2CAP_MODE_BASIC:
3072 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3073 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3076 rfc.mode = L2CAP_MODE_BASIC;
3078 rfc.max_transmit = 0;
3079 rfc.retrans_timeout = 0;
3080 rfc.monitor_timeout = 0;
3081 rfc.max_pdu_size = 0;
3083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3084 (unsigned long) &rfc);
3087 case L2CAP_MODE_ERTM:
3088 rfc.mode = L2CAP_MODE_ERTM;
3089 rfc.max_transmit = chan->max_tx;
3091 __l2cap_set_ertm_timeouts(chan, &rfc);
3093 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3094 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3096 rfc.max_pdu_size = cpu_to_le16(size);
3098 l2cap_txwin_setup(chan);
3100 rfc.txwin_size = min_t(u16, chan->tx_win,
3101 L2CAP_DEFAULT_TX_WINDOW);
3103 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3104 (unsigned long) &rfc);
3106 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3107 l2cap_add_opt_efs(&ptr, chan);
3109 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3110 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3113 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3114 if (chan->fcs == L2CAP_FCS_NONE ||
3115 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3116 chan->fcs = L2CAP_FCS_NONE;
3117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3122 case L2CAP_MODE_STREAMING:
3123 l2cap_txwin_setup(chan);
3124 rfc.mode = L2CAP_MODE_STREAMING;
3126 rfc.max_transmit = 0;
3127 rfc.retrans_timeout = 0;
3128 rfc.monitor_timeout = 0;
3130 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3131 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3133 rfc.max_pdu_size = cpu_to_le16(size);
3135 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3136 (unsigned long) &rfc);
3138 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3139 l2cap_add_opt_efs(&ptr, chan);
3141 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3142 if (chan->fcs == L2CAP_FCS_NONE ||
3143 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3144 chan->fcs = L2CAP_FCS_NONE;
3145 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3151 req->dcid = cpu_to_le16(chan->dcid);
3152 req->flags = __constant_cpu_to_le16(0);
3157 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3159 struct l2cap_conf_rsp *rsp = data;
3160 void *ptr = rsp->data;
3161 void *req = chan->conf_req;
3162 int len = chan->conf_len;
3163 int type, hint, olen;
3165 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3166 struct l2cap_conf_efs efs;
3168 u16 mtu = L2CAP_DEFAULT_MTU;
3169 u16 result = L2CAP_CONF_SUCCESS;
3172 BT_DBG("chan %p", chan);
3174 while (len >= L2CAP_CONF_OPT_SIZE) {
3175 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3177 hint = type & L2CAP_CONF_HINT;
3178 type &= L2CAP_CONF_MASK;
3181 case L2CAP_CONF_MTU:
3185 case L2CAP_CONF_FLUSH_TO:
3186 chan->flush_to = val;
3189 case L2CAP_CONF_QOS:
3192 case L2CAP_CONF_RFC:
3193 if (olen == sizeof(rfc))
3194 memcpy(&rfc, (void *) val, olen);
3197 case L2CAP_CONF_FCS:
3198 if (val == L2CAP_FCS_NONE)
3199 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3202 case L2CAP_CONF_EFS:
3204 if (olen == sizeof(efs))
3205 memcpy(&efs, (void *) val, olen);
3208 case L2CAP_CONF_EWS:
3210 return -ECONNREFUSED;
3212 set_bit(FLAG_EXT_CTRL, &chan->flags);
3213 set_bit(CONF_EWS_RECV, &chan->conf_state);
3214 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3215 chan->remote_tx_win = val;
3222 result = L2CAP_CONF_UNKNOWN;
3223 *((u8 *) ptr++) = type;
3228 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3231 switch (chan->mode) {
3232 case L2CAP_MODE_STREAMING:
3233 case L2CAP_MODE_ERTM:
3234 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3235 chan->mode = l2cap_select_mode(rfc.mode,
3236 chan->conn->feat_mask);
3241 if (__l2cap_efs_supported(chan))
3242 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3244 return -ECONNREFUSED;
3247 if (chan->mode != rfc.mode)
3248 return -ECONNREFUSED;
3254 if (chan->mode != rfc.mode) {
3255 result = L2CAP_CONF_UNACCEPT;
3256 rfc.mode = chan->mode;
3258 if (chan->num_conf_rsp == 1)
3259 return -ECONNREFUSED;
3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3262 (unsigned long) &rfc);
3265 if (result == L2CAP_CONF_SUCCESS) {
3266 /* Configure output options and let the other side know
3267 * which ones we don't like. */
3269 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3270 result = L2CAP_CONF_UNACCEPT;
3273 set_bit(CONF_MTU_DONE, &chan->conf_state);
3275 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3278 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3279 efs.stype != L2CAP_SERV_NOTRAFIC &&
3280 efs.stype != chan->local_stype) {
3282 result = L2CAP_CONF_UNACCEPT;
3284 if (chan->num_conf_req >= 1)
3285 return -ECONNREFUSED;
3287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3289 (unsigned long) &efs);
3291 /* Send PENDING Conf Rsp */
3292 result = L2CAP_CONF_PENDING;
3293 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3298 case L2CAP_MODE_BASIC:
3299 chan->fcs = L2CAP_FCS_NONE;
3300 set_bit(CONF_MODE_DONE, &chan->conf_state);
3303 case L2CAP_MODE_ERTM:
3304 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3305 chan->remote_tx_win = rfc.txwin_size;
3307 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3309 chan->remote_max_tx = rfc.max_transmit;
3311 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3312 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3313 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3314 rfc.max_pdu_size = cpu_to_le16(size);
3315 chan->remote_mps = size;
3317 __l2cap_set_ertm_timeouts(chan, &rfc);
3319 set_bit(CONF_MODE_DONE, &chan->conf_state);
3321 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3322 sizeof(rfc), (unsigned long) &rfc);
3324 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3325 chan->remote_id = efs.id;
3326 chan->remote_stype = efs.stype;
3327 chan->remote_msdu = le16_to_cpu(efs.msdu);
3328 chan->remote_flush_to =
3329 le32_to_cpu(efs.flush_to);
3330 chan->remote_acc_lat =
3331 le32_to_cpu(efs.acc_lat);
3332 chan->remote_sdu_itime =
3333 le32_to_cpu(efs.sdu_itime);
3334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3336 (unsigned long) &efs);
3340 case L2CAP_MODE_STREAMING:
3341 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3342 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3343 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3344 rfc.max_pdu_size = cpu_to_le16(size);
3345 chan->remote_mps = size;
3347 set_bit(CONF_MODE_DONE, &chan->conf_state);
3349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3350 (unsigned long) &rfc);
3355 result = L2CAP_CONF_UNACCEPT;
3357 memset(&rfc, 0, sizeof(rfc));
3358 rfc.mode = chan->mode;
3361 if (result == L2CAP_CONF_SUCCESS)
3362 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3364 rsp->scid = cpu_to_le16(chan->dcid);
3365 rsp->result = cpu_to_le16(result);
3366 rsp->flags = __constant_cpu_to_le16(0);
3371 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3372 void *data, u16 *result)
3374 struct l2cap_conf_req *req = data;
3375 void *ptr = req->data;
3378 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3379 struct l2cap_conf_efs efs;
3381 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3383 while (len >= L2CAP_CONF_OPT_SIZE) {
3384 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3387 case L2CAP_CONF_MTU:
3388 if (val < L2CAP_DEFAULT_MIN_MTU) {
3389 *result = L2CAP_CONF_UNACCEPT;
3390 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3396 case L2CAP_CONF_FLUSH_TO:
3397 chan->flush_to = val;
3398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3402 case L2CAP_CONF_RFC:
3403 if (olen == sizeof(rfc))
3404 memcpy(&rfc, (void *)val, olen);
3406 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3407 rfc.mode != chan->mode)
3408 return -ECONNREFUSED;
3412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3413 sizeof(rfc), (unsigned long) &rfc);
3416 case L2CAP_CONF_EWS:
3417 chan->ack_win = min_t(u16, val, chan->ack_win);
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3422 case L2CAP_CONF_EFS:
3423 if (olen == sizeof(efs))
3424 memcpy(&efs, (void *)val, olen);
3426 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 efs.stype != chan->local_stype)
3429 return -ECONNREFUSED;
3431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3432 (unsigned long) &efs);
3435 case L2CAP_CONF_FCS:
3436 if (*result == L2CAP_CONF_PENDING)
3437 if (val == L2CAP_FCS_NONE)
3438 set_bit(CONF_RECV_NO_FCS,
3444 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3445 return -ECONNREFUSED;
3447 chan->mode = rfc.mode;
3449 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3451 case L2CAP_MODE_ERTM:
3452 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3453 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3454 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3455 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3456 chan->ack_win = min_t(u16, chan->ack_win,
3459 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3460 chan->local_msdu = le16_to_cpu(efs.msdu);
3461 chan->local_sdu_itime =
3462 le32_to_cpu(efs.sdu_itime);
3463 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3464 chan->local_flush_to =
3465 le32_to_cpu(efs.flush_to);
3469 case L2CAP_MODE_STREAMING:
3470 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3474 req->dcid = cpu_to_le16(chan->dcid);
3475 req->flags = __constant_cpu_to_le16(0);
3480 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3481 u16 result, u16 flags)
3483 struct l2cap_conf_rsp *rsp = data;
3484 void *ptr = rsp->data;
3486 BT_DBG("chan %p", chan);
3488 rsp->scid = cpu_to_le16(chan->dcid);
3489 rsp->result = cpu_to_le16(result);
3490 rsp->flags = cpu_to_le16(flags);
3495 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3497 struct l2cap_conn_rsp rsp;
3498 struct l2cap_conn *conn = chan->conn;
3502 rsp.scid = cpu_to_le16(chan->dcid);
3503 rsp.dcid = cpu_to_le16(chan->scid);
3504 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3505 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3508 rsp_code = L2CAP_CREATE_CHAN_RSP;
3510 rsp_code = L2CAP_CONN_RSP;
3512 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3514 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3516 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3519 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3520 l2cap_build_conf_req(chan, buf), buf);
3521 chan->num_conf_req++;
3524 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3528 /* Use sane default values in case a misbehaving remote device
3529 * did not send an RFC or extended window size option.
3531 u16 txwin_ext = chan->ack_win;
3532 struct l2cap_conf_rfc rfc = {
3534 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3535 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3536 .max_pdu_size = cpu_to_le16(chan->imtu),
3537 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3540 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3542 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3545 while (len >= L2CAP_CONF_OPT_SIZE) {
3546 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3549 case L2CAP_CONF_RFC:
3550 if (olen == sizeof(rfc))
3551 memcpy(&rfc, (void *)val, olen);
3553 case L2CAP_CONF_EWS:
3560 case L2CAP_MODE_ERTM:
3561 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3562 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3563 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3564 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3565 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3567 chan->ack_win = min_t(u16, chan->ack_win,
3570 case L2CAP_MODE_STREAMING:
3571 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3575 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3576 struct l2cap_cmd_hdr *cmd, u8 *data)
3578 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3580 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3583 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3584 cmd->ident == conn->info_ident) {
3585 cancel_delayed_work(&conn->info_timer);
3587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3588 conn->info_ident = 0;
3590 l2cap_conn_start(conn);
3596 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3597 struct l2cap_cmd_hdr *cmd,
3598 u8 *data, u8 rsp_code, u8 amp_id)
3600 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3601 struct l2cap_conn_rsp rsp;
3602 struct l2cap_chan *chan = NULL, *pchan;
3603 struct sock *parent, *sk = NULL;
3604 int result, status = L2CAP_CS_NO_INFO;
3606 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3607 __le16 psm = req->psm;
3609 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3611 /* Check if we have socket listening on psm */
3612 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3614 result = L2CAP_CR_BAD_PSM;
3620 mutex_lock(&conn->chan_lock);
3623 /* Check if the ACL is secure enough (if not SDP) */
3624 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3625 !hci_conn_check_link_mode(conn->hcon)) {
3626 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3627 result = L2CAP_CR_SEC_BLOCK;
3631 result = L2CAP_CR_NO_MEM;
3633 /* Check if we already have channel with that dcid */
3634 if (__l2cap_get_chan_by_dcid(conn, scid))
3637 chan = pchan->ops->new_connection(pchan);
3643 hci_conn_hold(conn->hcon);
3645 bacpy(&bt_sk(sk)->src, conn->src);
3646 bacpy(&bt_sk(sk)->dst, conn->dst);
3649 chan->local_amp_id = amp_id;
3651 __l2cap_chan_add(conn, chan);
3655 __set_chan_timer(chan, sk->sk_sndtimeo);
3657 chan->ident = cmd->ident;
3659 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3660 if (l2cap_chan_check_security(chan)) {
3661 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3662 __l2cap_state_change(chan, BT_CONNECT2);
3663 result = L2CAP_CR_PEND;
3664 status = L2CAP_CS_AUTHOR_PEND;
3665 chan->ops->defer(chan);
3667 /* Force pending result for AMP controllers.
3668 * The connection will succeed after the
3669 * physical link is up.
3672 __l2cap_state_change(chan, BT_CONNECT2);
3673 result = L2CAP_CR_PEND;
3675 __l2cap_state_change(chan, BT_CONFIG);
3676 result = L2CAP_CR_SUCCESS;
3678 status = L2CAP_CS_NO_INFO;
3681 __l2cap_state_change(chan, BT_CONNECT2);
3682 result = L2CAP_CR_PEND;
3683 status = L2CAP_CS_AUTHEN_PEND;
3686 __l2cap_state_change(chan, BT_CONNECT2);
3687 result = L2CAP_CR_PEND;
3688 status = L2CAP_CS_NO_INFO;
3692 release_sock(parent);
3693 mutex_unlock(&conn->chan_lock);
3696 rsp.scid = cpu_to_le16(scid);
3697 rsp.dcid = cpu_to_le16(dcid);
3698 rsp.result = cpu_to_le16(result);
3699 rsp.status = cpu_to_le16(status);
3700 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3702 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3703 struct l2cap_info_req info;
3704 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3706 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3707 conn->info_ident = l2cap_get_ident(conn);
3709 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3711 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3712 sizeof(info), &info);
3715 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3716 result == L2CAP_CR_SUCCESS) {
3718 set_bit(CONF_REQ_SENT, &chan->conf_state);
3719 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3720 l2cap_build_conf_req(chan, buf), buf);
3721 chan->num_conf_req++;
3727 static int l2cap_connect_req(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u8 *data)
3730 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3734 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3735 struct l2cap_cmd_hdr *cmd, u8 *data)
3737 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3738 u16 scid, dcid, result, status;
3739 struct l2cap_chan *chan;
3743 scid = __le16_to_cpu(rsp->scid);
3744 dcid = __le16_to_cpu(rsp->dcid);
3745 result = __le16_to_cpu(rsp->result);
3746 status = __le16_to_cpu(rsp->status);
3748 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3749 dcid, scid, result, status);
3751 mutex_lock(&conn->chan_lock);
3754 chan = __l2cap_get_chan_by_scid(conn, scid);
3760 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3769 l2cap_chan_lock(chan);
3772 case L2CAP_CR_SUCCESS:
3773 l2cap_state_change(chan, BT_CONFIG);
3776 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3778 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3781 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3782 l2cap_build_conf_req(chan, req), req);
3783 chan->num_conf_req++;
3787 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3791 l2cap_chan_del(chan, ECONNREFUSED);
3795 l2cap_chan_unlock(chan);
3798 mutex_unlock(&conn->chan_lock);
3803 static inline void set_default_fcs(struct l2cap_chan *chan)
3805 /* FCS is enabled only in ERTM or streaming mode, if one or both
3808 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3809 chan->fcs = L2CAP_FCS_NONE;
3810 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3811 chan->fcs = L2CAP_FCS_CRC16;
3814 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3815 u8 ident, u16 flags)
3817 struct l2cap_conn *conn = chan->conn;
3819 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3822 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3823 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3825 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3826 l2cap_build_conf_rsp(chan, data,
3827 L2CAP_CONF_SUCCESS, flags), data);
3830 static inline int l2cap_config_req(struct l2cap_conn *conn,
3831 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3834 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3837 struct l2cap_chan *chan;
3840 dcid = __le16_to_cpu(req->dcid);
3841 flags = __le16_to_cpu(req->flags);
3843 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3845 chan = l2cap_get_chan_by_scid(conn, dcid);
3849 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3850 struct l2cap_cmd_rej_cid rej;
3852 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3853 rej.scid = cpu_to_le16(chan->scid);
3854 rej.dcid = cpu_to_le16(chan->dcid);
3856 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3861 /* Reject if config buffer is too small. */
3862 len = cmd_len - sizeof(*req);
3863 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3864 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3865 l2cap_build_conf_rsp(chan, rsp,
3866 L2CAP_CONF_REJECT, flags), rsp);
3871 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3872 chan->conf_len += len;
3874 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3875 /* Incomplete config. Send empty response. */
3876 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3877 l2cap_build_conf_rsp(chan, rsp,
3878 L2CAP_CONF_SUCCESS, flags), rsp);
3882 /* Complete config. */
3883 len = l2cap_parse_conf_req(chan, rsp);
3885 l2cap_send_disconn_req(chan, ECONNRESET);
3889 chan->ident = cmd->ident;
3890 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3891 chan->num_conf_rsp++;
3893 /* Reset config buffer. */
3896 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3899 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3900 set_default_fcs(chan);
3902 if (chan->mode == L2CAP_MODE_ERTM ||
3903 chan->mode == L2CAP_MODE_STREAMING)
3904 err = l2cap_ertm_init(chan);
3907 l2cap_send_disconn_req(chan, -err);
3909 l2cap_chan_ready(chan);
3914 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3916 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3917 l2cap_build_conf_req(chan, buf), buf);
3918 chan->num_conf_req++;
3921 /* Got Conf Rsp PENDING from remote side and asume we sent
3922 Conf Rsp PENDING in the code above */
3923 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3924 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3926 /* check compatibility */
3928 /* Send rsp for BR/EDR channel */
3930 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3932 chan->ident = cmd->ident;
3936 l2cap_chan_unlock(chan);
3940 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3941 struct l2cap_cmd_hdr *cmd, u8 *data)
3943 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3944 u16 scid, flags, result;
3945 struct l2cap_chan *chan;
3946 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3949 scid = __le16_to_cpu(rsp->scid);
3950 flags = __le16_to_cpu(rsp->flags);
3951 result = __le16_to_cpu(rsp->result);
3953 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3956 chan = l2cap_get_chan_by_scid(conn, scid);
3961 case L2CAP_CONF_SUCCESS:
3962 l2cap_conf_rfc_get(chan, rsp->data, len);
3963 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3966 case L2CAP_CONF_PENDING:
3967 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3969 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3972 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3975 l2cap_send_disconn_req(chan, ECONNRESET);
3979 if (!chan->hs_hcon) {
3980 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3983 if (l2cap_check_efs(chan)) {
3984 amp_create_logical_link(chan);
3985 chan->ident = cmd->ident;
3991 case L2CAP_CONF_UNACCEPT:
3992 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3995 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3996 l2cap_send_disconn_req(chan, ECONNRESET);
4000 /* throw out any old stored conf requests */
4001 result = L2CAP_CONF_SUCCESS;
4002 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4005 l2cap_send_disconn_req(chan, ECONNRESET);
4009 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4010 L2CAP_CONF_REQ, len, req);
4011 chan->num_conf_req++;
4012 if (result != L2CAP_CONF_SUCCESS)
4018 l2cap_chan_set_err(chan, ECONNRESET);
4020 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4021 l2cap_send_disconn_req(chan, ECONNRESET);
4025 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4028 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4030 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4031 set_default_fcs(chan);
4033 if (chan->mode == L2CAP_MODE_ERTM ||
4034 chan->mode == L2CAP_MODE_STREAMING)
4035 err = l2cap_ertm_init(chan);
4038 l2cap_send_disconn_req(chan, -err);
4040 l2cap_chan_ready(chan);
4044 l2cap_chan_unlock(chan);
4048 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4049 struct l2cap_cmd_hdr *cmd, u8 *data)
4051 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4052 struct l2cap_disconn_rsp rsp;
4054 struct l2cap_chan *chan;
4057 scid = __le16_to_cpu(req->scid);
4058 dcid = __le16_to_cpu(req->dcid);
4060 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4062 mutex_lock(&conn->chan_lock);
4064 chan = __l2cap_get_chan_by_scid(conn, dcid);
4066 mutex_unlock(&conn->chan_lock);
4070 l2cap_chan_lock(chan);
4074 rsp.dcid = cpu_to_le16(chan->scid);
4075 rsp.scid = cpu_to_le16(chan->dcid);
4076 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4079 sk->sk_shutdown = SHUTDOWN_MASK;
4082 l2cap_chan_hold(chan);
4083 l2cap_chan_del(chan, ECONNRESET);
4085 l2cap_chan_unlock(chan);
4087 chan->ops->close(chan);
4088 l2cap_chan_put(chan);
4090 mutex_unlock(&conn->chan_lock);
4095 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4096 struct l2cap_cmd_hdr *cmd, u8 *data)
4098 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4100 struct l2cap_chan *chan;
4102 scid = __le16_to_cpu(rsp->scid);
4103 dcid = __le16_to_cpu(rsp->dcid);
4105 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4107 mutex_lock(&conn->chan_lock);
4109 chan = __l2cap_get_chan_by_scid(conn, scid);
4111 mutex_unlock(&conn->chan_lock);
4115 l2cap_chan_lock(chan);
4117 l2cap_chan_hold(chan);
4118 l2cap_chan_del(chan, 0);
4120 l2cap_chan_unlock(chan);
4122 chan->ops->close(chan);
4123 l2cap_chan_put(chan);
4125 mutex_unlock(&conn->chan_lock);
4130 static inline int l2cap_information_req(struct l2cap_conn *conn,
4131 struct l2cap_cmd_hdr *cmd, u8 *data)
4133 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4136 type = __le16_to_cpu(req->type);
4138 BT_DBG("type 0x%4.4x", type);
4140 if (type == L2CAP_IT_FEAT_MASK) {
4142 u32 feat_mask = l2cap_feat_mask;
4143 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4144 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4145 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4147 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4150 feat_mask |= L2CAP_FEAT_EXT_FLOW
4151 | L2CAP_FEAT_EXT_WINDOW;
4153 put_unaligned_le32(feat_mask, rsp->data);
4154 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4156 } else if (type == L2CAP_IT_FIXED_CHAN) {
4158 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4161 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4163 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4165 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4166 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4167 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4168 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4171 struct l2cap_info_rsp rsp;
4172 rsp.type = cpu_to_le16(type);
4173 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4174 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4181 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4182 struct l2cap_cmd_hdr *cmd, u8 *data)
4184 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4187 type = __le16_to_cpu(rsp->type);
4188 result = __le16_to_cpu(rsp->result);
4190 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4192 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4193 if (cmd->ident != conn->info_ident ||
4194 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4197 cancel_delayed_work(&conn->info_timer);
4199 if (result != L2CAP_IR_SUCCESS) {
4200 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4201 conn->info_ident = 0;
4203 l2cap_conn_start(conn);
4209 case L2CAP_IT_FEAT_MASK:
4210 conn->feat_mask = get_unaligned_le32(rsp->data);
4212 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4213 struct l2cap_info_req req;
4214 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4216 conn->info_ident = l2cap_get_ident(conn);
4218 l2cap_send_cmd(conn, conn->info_ident,
4219 L2CAP_INFO_REQ, sizeof(req), &req);
4221 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4222 conn->info_ident = 0;
4224 l2cap_conn_start(conn);
4228 case L2CAP_IT_FIXED_CHAN:
4229 conn->fixed_chan_mask = rsp->data[0];
4230 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4231 conn->info_ident = 0;
4233 l2cap_conn_start(conn);
4240 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4241 struct l2cap_cmd_hdr *cmd,
4242 u16 cmd_len, void *data)
4244 struct l2cap_create_chan_req *req = data;
4245 struct l2cap_create_chan_rsp rsp;
4246 struct l2cap_chan *chan;
4247 struct hci_dev *hdev;
4250 if (cmd_len != sizeof(*req))
4256 psm = le16_to_cpu(req->psm);
4257 scid = le16_to_cpu(req->scid);
4259 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4261 /* For controller id 0 make BR/EDR connection */
4262 if (req->amp_id == HCI_BREDR_ID) {
4263 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4268 /* Validate AMP controller id */
4269 hdev = hci_dev_get(req->amp_id);
4273 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4278 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4281 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4282 struct hci_conn *hs_hcon;
4284 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4290 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4292 mgr->bredr_chan = chan;
4293 chan->hs_hcon = hs_hcon;
4294 chan->fcs = L2CAP_FCS_NONE;
4295 conn->mtu = hdev->block_mtu;
4304 rsp.scid = cpu_to_le16(scid);
4305 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4306 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4308 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4314 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4316 struct l2cap_move_chan_req req;
4319 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4321 ident = l2cap_get_ident(chan->conn);
4322 chan->ident = ident;
4324 req.icid = cpu_to_le16(chan->scid);
4325 req.dest_amp_id = dest_amp_id;
4327 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4330 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4333 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4335 struct l2cap_move_chan_rsp rsp;
4337 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4339 rsp.icid = cpu_to_le16(chan->dcid);
4340 rsp.result = cpu_to_le16(result);
4342 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4346 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4348 struct l2cap_move_chan_cfm cfm;
4350 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4352 chan->ident = l2cap_get_ident(chan->conn);
4354 cfm.icid = cpu_to_le16(chan->scid);
4355 cfm.result = cpu_to_le16(result);
4357 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4360 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4363 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4365 struct l2cap_move_chan_cfm cfm;
4367 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4369 cfm.icid = cpu_to_le16(icid);
4370 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4372 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4376 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4379 struct l2cap_move_chan_cfm_rsp rsp;
4381 BT_DBG("icid 0x%4.4x", icid);
4383 rsp.icid = cpu_to_le16(icid);
4384 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4387 static void __release_logical_link(struct l2cap_chan *chan)
4389 chan->hs_hchan = NULL;
4390 chan->hs_hcon = NULL;
4392 /* Placeholder - release the logical link */
4395 static void l2cap_logical_fail(struct l2cap_chan *chan)
4397 /* Logical link setup failed */
4398 if (chan->state != BT_CONNECTED) {
4399 /* Create channel failure, disconnect */
4400 l2cap_send_disconn_req(chan, ECONNRESET);
4404 switch (chan->move_role) {
4405 case L2CAP_MOVE_ROLE_RESPONDER:
4406 l2cap_move_done(chan);
4407 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4409 case L2CAP_MOVE_ROLE_INITIATOR:
4410 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4411 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4412 /* Remote has only sent pending or
4413 * success responses, clean up
4415 l2cap_move_done(chan);
4418 /* Other amp move states imply that the move
4419 * has already aborted
4421 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4426 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4427 struct hci_chan *hchan)
4429 struct l2cap_conf_rsp rsp;
4431 chan->hs_hchan = hchan;
4432 chan->hs_hcon->l2cap_data = chan->conn;
4434 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4436 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4439 set_default_fcs(chan);
4441 err = l2cap_ertm_init(chan);
4443 l2cap_send_disconn_req(chan, -err);
4445 l2cap_chan_ready(chan);
4449 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4450 struct hci_chan *hchan)
4452 chan->hs_hcon = hchan->conn;
4453 chan->hs_hcon->l2cap_data = chan->conn;
4455 BT_DBG("move_state %d", chan->move_state);
4457 switch (chan->move_state) {
4458 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4459 /* Move confirm will be sent after a success
4460 * response is received
4462 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4464 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4465 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4466 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4467 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4468 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4469 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4470 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4471 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4472 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4476 /* Move was not in expected state, free the channel */
4477 __release_logical_link(chan);
4479 chan->move_state = L2CAP_MOVE_STABLE;
4483 /* Call with chan locked */
4484 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4487 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4490 l2cap_logical_fail(chan);
4491 __release_logical_link(chan);
4495 if (chan->state != BT_CONNECTED) {
4496 /* Ignore logical link if channel is on BR/EDR */
4497 if (chan->local_amp_id)
4498 l2cap_logical_finish_create(chan, hchan);
4500 l2cap_logical_finish_move(chan, hchan);
4504 void l2cap_move_start(struct l2cap_chan *chan)
4506 BT_DBG("chan %p", chan);
4508 if (chan->local_amp_id == HCI_BREDR_ID) {
4509 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4511 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4512 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4513 /* Placeholder - start physical link setup */
4515 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4516 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4518 l2cap_move_setup(chan);
4519 l2cap_send_move_chan_req(chan, 0);
4523 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4524 u8 local_amp_id, u8 remote_amp_id)
4526 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4527 local_amp_id, remote_amp_id);
4529 chan->fcs = L2CAP_FCS_NONE;
4531 /* Outgoing channel on AMP */
4532 if (chan->state == BT_CONNECT) {
4533 if (result == L2CAP_CR_SUCCESS) {
4534 chan->local_amp_id = local_amp_id;
4535 l2cap_send_create_chan_req(chan, remote_amp_id);
4537 /* Revert to BR/EDR connect */
4538 l2cap_send_conn_req(chan);
4544 /* Incoming channel on AMP */
4545 if (__l2cap_no_conn_pending(chan)) {
4546 struct l2cap_conn_rsp rsp;
4548 rsp.scid = cpu_to_le16(chan->dcid);
4549 rsp.dcid = cpu_to_le16(chan->scid);
4551 if (result == L2CAP_CR_SUCCESS) {
4552 /* Send successful response */
4553 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4554 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4556 /* Send negative response */
4557 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4558 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4561 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4564 if (result == L2CAP_CR_SUCCESS) {
4565 __l2cap_state_change(chan, BT_CONFIG);
4566 set_bit(CONF_REQ_SENT, &chan->conf_state);
4567 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4569 l2cap_build_conf_req(chan, buf), buf);
4570 chan->num_conf_req++;
4575 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4578 l2cap_move_setup(chan);
4579 chan->move_id = local_amp_id;
4580 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4582 l2cap_send_move_chan_req(chan, remote_amp_id);
4585 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4587 struct hci_chan *hchan = NULL;
4589 /* Placeholder - get hci_chan for logical link */
4592 if (hchan->state == BT_CONNECTED) {
4593 /* Logical link is ready to go */
4594 chan->hs_hcon = hchan->conn;
4595 chan->hs_hcon->l2cap_data = chan->conn;
4596 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4597 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4599 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4601 /* Wait for logical link to be ready */
4602 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4605 /* Logical link not available */
4606 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4610 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4612 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4614 if (result == -EINVAL)
4615 rsp_result = L2CAP_MR_BAD_ID;
4617 rsp_result = L2CAP_MR_NOT_ALLOWED;
4619 l2cap_send_move_chan_rsp(chan, rsp_result);
4622 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4623 chan->move_state = L2CAP_MOVE_STABLE;
4625 /* Restart data transmission */
4626 l2cap_ertm_send(chan);
4629 /* Invoke with locked chan */
4630 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4632 u8 local_amp_id = chan->local_amp_id;
4633 u8 remote_amp_id = chan->remote_amp_id;
4635 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4636 chan, result, local_amp_id, remote_amp_id);
4638 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4639 l2cap_chan_unlock(chan);
4643 if (chan->state != BT_CONNECTED) {
4644 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4645 } else if (result != L2CAP_MR_SUCCESS) {
4646 l2cap_do_move_cancel(chan, result);
4648 switch (chan->move_role) {
4649 case L2CAP_MOVE_ROLE_INITIATOR:
4650 l2cap_do_move_initiate(chan, local_amp_id,
4653 case L2CAP_MOVE_ROLE_RESPONDER:
4654 l2cap_do_move_respond(chan, result);
4657 l2cap_do_move_cancel(chan, result);
4663 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4664 struct l2cap_cmd_hdr *cmd,
4665 u16 cmd_len, void *data)
4667 struct l2cap_move_chan_req *req = data;
4668 struct l2cap_move_chan_rsp rsp;
4669 struct l2cap_chan *chan;
4671 u16 result = L2CAP_MR_NOT_ALLOWED;
4673 if (cmd_len != sizeof(*req))
4676 icid = le16_to_cpu(req->icid);
4678 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4683 chan = l2cap_get_chan_by_dcid(conn, icid);
4685 rsp.icid = cpu_to_le16(icid);
4686 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4687 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4692 chan->ident = cmd->ident;
4694 if (chan->scid < L2CAP_CID_DYN_START ||
4695 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4696 (chan->mode != L2CAP_MODE_ERTM &&
4697 chan->mode != L2CAP_MODE_STREAMING)) {
4698 result = L2CAP_MR_NOT_ALLOWED;
4699 goto send_move_response;
4702 if (chan->local_amp_id == req->dest_amp_id) {
4703 result = L2CAP_MR_SAME_ID;
4704 goto send_move_response;
4707 if (req->dest_amp_id) {
4708 struct hci_dev *hdev;
4709 hdev = hci_dev_get(req->dest_amp_id);
4710 if (!hdev || hdev->dev_type != HCI_AMP ||
4711 !test_bit(HCI_UP, &hdev->flags)) {
4715 result = L2CAP_MR_BAD_ID;
4716 goto send_move_response;
4721 /* Detect a move collision. Only send a collision response
4722 * if this side has "lost", otherwise proceed with the move.
4723 * The winner has the larger bd_addr.
4725 if ((__chan_is_moving(chan) ||
4726 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4727 bacmp(conn->src, conn->dst) > 0) {
4728 result = L2CAP_MR_COLLISION;
4729 goto send_move_response;
4732 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4733 l2cap_move_setup(chan);
4734 chan->move_id = req->dest_amp_id;
4737 if (!req->dest_amp_id) {
4738 /* Moving to BR/EDR */
4739 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4740 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4741 result = L2CAP_MR_PEND;
4743 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4744 result = L2CAP_MR_SUCCESS;
4747 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4748 /* Placeholder - uncomment when amp functions are available */
4749 /*amp_accept_physical(chan, req->dest_amp_id);*/
4750 result = L2CAP_MR_PEND;
4754 l2cap_send_move_chan_rsp(chan, result);
4756 l2cap_chan_unlock(chan);
4761 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4763 struct l2cap_chan *chan;
4764 struct hci_chan *hchan = NULL;
4766 chan = l2cap_get_chan_by_scid(conn, icid);
4768 l2cap_send_move_chan_cfm_icid(conn, icid);
4772 __clear_chan_timer(chan);
4773 if (result == L2CAP_MR_PEND)
4774 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4776 switch (chan->move_state) {
4777 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4778 /* Move confirm will be sent when logical link
4781 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4783 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4784 if (result == L2CAP_MR_PEND) {
4786 } else if (test_bit(CONN_LOCAL_BUSY,
4787 &chan->conn_state)) {
4788 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4790 /* Logical link is up or moving to BR/EDR,
4793 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4794 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4797 case L2CAP_MOVE_WAIT_RSP:
4799 if (result == L2CAP_MR_SUCCESS) {
4800 /* Remote is ready, send confirm immediately
4801 * after logical link is ready
4803 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4805 /* Both logical link and move success
4806 * are required to confirm
4808 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4811 /* Placeholder - get hci_chan for logical link */
4813 /* Logical link not available */
4814 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4818 /* If the logical link is not yet connected, do not
4819 * send confirmation.
4821 if (hchan->state != BT_CONNECTED)
4824 /* Logical link is already ready to go */
4826 chan->hs_hcon = hchan->conn;
4827 chan->hs_hcon->l2cap_data = chan->conn;
4829 if (result == L2CAP_MR_SUCCESS) {
4830 /* Can confirm now */
4831 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4833 /* Now only need move success
4836 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4839 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4842 /* Any other amp move state means the move failed. */
4843 chan->move_id = chan->local_amp_id;
4844 l2cap_move_done(chan);
4845 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4848 l2cap_chan_unlock(chan);
4851 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4854 struct l2cap_chan *chan;
4856 chan = l2cap_get_chan_by_ident(conn, ident);
4858 /* Could not locate channel, icid is best guess */
4859 l2cap_send_move_chan_cfm_icid(conn, icid);
4863 __clear_chan_timer(chan);
4865 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4866 if (result == L2CAP_MR_COLLISION) {
4867 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4869 /* Cleanup - cancel move */
4870 chan->move_id = chan->local_amp_id;
4871 l2cap_move_done(chan);
4875 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4877 l2cap_chan_unlock(chan);
4880 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4881 struct l2cap_cmd_hdr *cmd,
4882 u16 cmd_len, void *data)
4884 struct l2cap_move_chan_rsp *rsp = data;
4887 if (cmd_len != sizeof(*rsp))
4890 icid = le16_to_cpu(rsp->icid);
4891 result = le16_to_cpu(rsp->result);
4893 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4895 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4896 l2cap_move_continue(conn, icid, result);
4898 l2cap_move_fail(conn, cmd->ident, icid, result);
4903 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4904 struct l2cap_cmd_hdr *cmd,
4905 u16 cmd_len, void *data)
4907 struct l2cap_move_chan_cfm *cfm = data;
4908 struct l2cap_chan *chan;
4911 if (cmd_len != sizeof(*cfm))
4914 icid = le16_to_cpu(cfm->icid);
4915 result = le16_to_cpu(cfm->result);
4917 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4919 chan = l2cap_get_chan_by_dcid(conn, icid);
4921 /* Spec requires a response even if the icid was not found */
4922 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4926 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4927 if (result == L2CAP_MC_CONFIRMED) {
4928 chan->local_amp_id = chan->move_id;
4929 if (!chan->local_amp_id)
4930 __release_logical_link(chan);
4932 chan->move_id = chan->local_amp_id;
4935 l2cap_move_done(chan);
4938 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4940 l2cap_chan_unlock(chan);
4945 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4946 struct l2cap_cmd_hdr *cmd,
4947 u16 cmd_len, void *data)
4949 struct l2cap_move_chan_cfm_rsp *rsp = data;
4950 struct l2cap_chan *chan;
4953 if (cmd_len != sizeof(*rsp))
4956 icid = le16_to_cpu(rsp->icid);
4958 BT_DBG("icid 0x%4.4x", icid);
4960 chan = l2cap_get_chan_by_scid(conn, icid);
4964 __clear_chan_timer(chan);
4966 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4967 chan->local_amp_id = chan->move_id;
4969 if (!chan->local_amp_id && chan->hs_hchan)
4970 __release_logical_link(chan);
4972 l2cap_move_done(chan);
4975 l2cap_chan_unlock(chan);
4980 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4985 if (min > max || min < 6 || max > 3200)
4988 if (to_multiplier < 10 || to_multiplier > 3200)
4991 if (max >= to_multiplier * 8)
4994 max_latency = (to_multiplier * 8 / max) - 1;
4995 if (latency > 499 || latency > max_latency)
5001 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5002 struct l2cap_cmd_hdr *cmd,
5005 struct hci_conn *hcon = conn->hcon;
5006 struct l2cap_conn_param_update_req *req;
5007 struct l2cap_conn_param_update_rsp rsp;
5008 u16 min, max, latency, to_multiplier, cmd_len;
5011 if (!(hcon->link_mode & HCI_LM_MASTER))
5014 cmd_len = __le16_to_cpu(cmd->len);
5015 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5018 req = (struct l2cap_conn_param_update_req *) data;
5019 min = __le16_to_cpu(req->min);
5020 max = __le16_to_cpu(req->max);
5021 latency = __le16_to_cpu(req->latency);
5022 to_multiplier = __le16_to_cpu(req->to_multiplier);
5024 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5025 min, max, latency, to_multiplier);
5027 memset(&rsp, 0, sizeof(rsp));
5029 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5031 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5033 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5035 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5039 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5044 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5045 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5050 switch (cmd->code) {
5051 case L2CAP_COMMAND_REJ:
5052 l2cap_command_rej(conn, cmd, data);
5055 case L2CAP_CONN_REQ:
5056 err = l2cap_connect_req(conn, cmd, data);
5059 case L2CAP_CONN_RSP:
5060 case L2CAP_CREATE_CHAN_RSP:
5061 err = l2cap_connect_create_rsp(conn, cmd, data);
5064 case L2CAP_CONF_REQ:
5065 err = l2cap_config_req(conn, cmd, cmd_len, data);
5068 case L2CAP_CONF_RSP:
5069 err = l2cap_config_rsp(conn, cmd, data);
5072 case L2CAP_DISCONN_REQ:
5073 err = l2cap_disconnect_req(conn, cmd, data);
5076 case L2CAP_DISCONN_RSP:
5077 err = l2cap_disconnect_rsp(conn, cmd, data);
5080 case L2CAP_ECHO_REQ:
5081 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5084 case L2CAP_ECHO_RSP:
5087 case L2CAP_INFO_REQ:
5088 err = l2cap_information_req(conn, cmd, data);
5091 case L2CAP_INFO_RSP:
5092 err = l2cap_information_rsp(conn, cmd, data);
5095 case L2CAP_CREATE_CHAN_REQ:
5096 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5099 case L2CAP_MOVE_CHAN_REQ:
5100 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5103 case L2CAP_MOVE_CHAN_RSP:
5104 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5107 case L2CAP_MOVE_CHAN_CFM:
5108 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5111 case L2CAP_MOVE_CHAN_CFM_RSP:
5112 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5116 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5124 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5125 struct l2cap_cmd_hdr *cmd, u8 *data)
5127 switch (cmd->code) {
5128 case L2CAP_COMMAND_REJ:
5131 case L2CAP_CONN_PARAM_UPDATE_REQ:
5132 return l2cap_conn_param_update_req(conn, cmd, data);
5134 case L2CAP_CONN_PARAM_UPDATE_RSP:
5138 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5143 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5144 struct sk_buff *skb)
5146 u8 *data = skb->data;
5148 struct l2cap_cmd_hdr cmd;
5151 l2cap_raw_recv(conn, skb);
5153 while (len >= L2CAP_CMD_HDR_SIZE) {
5155 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5156 data += L2CAP_CMD_HDR_SIZE;
5157 len -= L2CAP_CMD_HDR_SIZE;
5159 cmd_len = le16_to_cpu(cmd.len);
5161 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5164 if (cmd_len > len || !cmd.ident) {
5165 BT_DBG("corrupted command");
5169 if (conn->hcon->type == LE_LINK)
5170 err = l2cap_le_sig_cmd(conn, &cmd, data);
5172 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5175 struct l2cap_cmd_rej_unk rej;
5177 BT_ERR("Wrong link type (%d)", err);
5179 /* FIXME: Map err to a valid reason */
5180 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5181 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5192 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5194 u16 our_fcs, rcv_fcs;
5197 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5198 hdr_size = L2CAP_EXT_HDR_SIZE;
5200 hdr_size = L2CAP_ENH_HDR_SIZE;
5202 if (chan->fcs == L2CAP_FCS_CRC16) {
5203 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5204 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5205 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5207 if (our_fcs != rcv_fcs)
5213 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5215 struct l2cap_ctrl control;
5217 BT_DBG("chan %p", chan);
5219 memset(&control, 0, sizeof(control));
5222 control.reqseq = chan->buffer_seq;
5223 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5225 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5226 control.super = L2CAP_SUPER_RNR;
5227 l2cap_send_sframe(chan, &control);
5230 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5231 chan->unacked_frames > 0)
5232 __set_retrans_timer(chan);
5234 /* Send pending iframes */
5235 l2cap_ertm_send(chan);
5237 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5238 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5239 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5242 control.super = L2CAP_SUPER_RR;
5243 l2cap_send_sframe(chan, &control);
5247 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5248 struct sk_buff **last_frag)
5250 /* skb->len reflects data in skb as well as all fragments
5251 * skb->data_len reflects only data in fragments
5253 if (!skb_has_frag_list(skb))
5254 skb_shinfo(skb)->frag_list = new_frag;
5256 new_frag->next = NULL;
5258 (*last_frag)->next = new_frag;
5259 *last_frag = new_frag;
5261 skb->len += new_frag->len;
5262 skb->data_len += new_frag->len;
5263 skb->truesize += new_frag->truesize;
5266 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5267 struct l2cap_ctrl *control)
5271 switch (control->sar) {
5272 case L2CAP_SAR_UNSEGMENTED:
5276 err = chan->ops->recv(chan, skb);
5279 case L2CAP_SAR_START:
5283 chan->sdu_len = get_unaligned_le16(skb->data);
5284 skb_pull(skb, L2CAP_SDULEN_SIZE);
5286 if (chan->sdu_len > chan->imtu) {
5291 if (skb->len >= chan->sdu_len)
5295 chan->sdu_last_frag = skb;
5301 case L2CAP_SAR_CONTINUE:
5305 append_skb_frag(chan->sdu, skb,
5306 &chan->sdu_last_frag);
5309 if (chan->sdu->len >= chan->sdu_len)
5319 append_skb_frag(chan->sdu, skb,
5320 &chan->sdu_last_frag);
5323 if (chan->sdu->len != chan->sdu_len)
5326 err = chan->ops->recv(chan, chan->sdu);
5329 /* Reassembly complete */
5331 chan->sdu_last_frag = NULL;
5339 kfree_skb(chan->sdu);
5341 chan->sdu_last_frag = NULL;
5348 static int l2cap_resegment(struct l2cap_chan *chan)
5354 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5358 if (chan->mode != L2CAP_MODE_ERTM)
5361 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5362 l2cap_tx(chan, NULL, NULL, event);
5365 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5368 /* Pass sequential frames to l2cap_reassemble_sdu()
5369 * until a gap is encountered.
5372 BT_DBG("chan %p", chan);
5374 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5375 struct sk_buff *skb;
5376 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5377 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5379 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5384 skb_unlink(skb, &chan->srej_q);
5385 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5386 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5391 if (skb_queue_empty(&chan->srej_q)) {
5392 chan->rx_state = L2CAP_RX_STATE_RECV;
5393 l2cap_send_ack(chan);
5399 static void l2cap_handle_srej(struct l2cap_chan *chan,
5400 struct l2cap_ctrl *control)
5402 struct sk_buff *skb;
5404 BT_DBG("chan %p, control %p", chan, control);
5406 if (control->reqseq == chan->next_tx_seq) {
5407 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5408 l2cap_send_disconn_req(chan, ECONNRESET);
5412 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5415 BT_DBG("Seq %d not available for retransmission",
5420 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5421 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5422 l2cap_send_disconn_req(chan, ECONNRESET);
5426 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5428 if (control->poll) {
5429 l2cap_pass_to_tx(chan, control);
5431 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5432 l2cap_retransmit(chan, control);
5433 l2cap_ertm_send(chan);
5435 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5436 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5437 chan->srej_save_reqseq = control->reqseq;
5440 l2cap_pass_to_tx_fbit(chan, control);
5442 if (control->final) {
5443 if (chan->srej_save_reqseq != control->reqseq ||
5444 !test_and_clear_bit(CONN_SREJ_ACT,
5446 l2cap_retransmit(chan, control);
5448 l2cap_retransmit(chan, control);
5449 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5450 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5451 chan->srej_save_reqseq = control->reqseq;
5457 static void l2cap_handle_rej(struct l2cap_chan *chan,
5458 struct l2cap_ctrl *control)
5460 struct sk_buff *skb;
5462 BT_DBG("chan %p, control %p", chan, control);
5464 if (control->reqseq == chan->next_tx_seq) {
5465 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5466 l2cap_send_disconn_req(chan, ECONNRESET);
5470 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5472 if (chan->max_tx && skb &&
5473 bt_cb(skb)->control.retries >= chan->max_tx) {
5474 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5475 l2cap_send_disconn_req(chan, ECONNRESET);
5479 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5481 l2cap_pass_to_tx(chan, control);
5483 if (control->final) {
5484 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5485 l2cap_retransmit_all(chan, control);
5487 l2cap_retransmit_all(chan, control);
5488 l2cap_ertm_send(chan);
5489 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5490 set_bit(CONN_REJ_ACT, &chan->conn_state);
5494 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5496 BT_DBG("chan %p, txseq %d", chan, txseq);
5498 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5499 chan->expected_tx_seq);
5501 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5502 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5504 /* See notes below regarding "double poll" and
5507 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5508 BT_DBG("Invalid/Ignore - after SREJ");
5509 return L2CAP_TXSEQ_INVALID_IGNORE;
5511 BT_DBG("Invalid - in window after SREJ sent");
5512 return L2CAP_TXSEQ_INVALID;
5516 if (chan->srej_list.head == txseq) {
5517 BT_DBG("Expected SREJ");
5518 return L2CAP_TXSEQ_EXPECTED_SREJ;
5521 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5522 BT_DBG("Duplicate SREJ - txseq already stored");
5523 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5526 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5527 BT_DBG("Unexpected SREJ - not requested");
5528 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5532 if (chan->expected_tx_seq == txseq) {
5533 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5535 BT_DBG("Invalid - txseq outside tx window");
5536 return L2CAP_TXSEQ_INVALID;
5539 return L2CAP_TXSEQ_EXPECTED;
5543 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5544 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5545 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5546 return L2CAP_TXSEQ_DUPLICATE;
5549 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5550 /* A source of invalid packets is a "double poll" condition,
5551 * where delays cause us to send multiple poll packets. If
5552 * the remote stack receives and processes both polls,
5553 * sequence numbers can wrap around in such a way that a
5554 * resent frame has a sequence number that looks like new data
5555 * with a sequence gap. This would trigger an erroneous SREJ
5558 * Fortunately, this is impossible with a tx window that's
5559 * less than half of the maximum sequence number, which allows
5560 * invalid frames to be safely ignored.
5562 * With tx window sizes greater than half of the tx window
5563 * maximum, the frame is invalid and cannot be ignored. This
5564 * causes a disconnect.
5567 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5568 BT_DBG("Invalid/Ignore - txseq outside tx window");
5569 return L2CAP_TXSEQ_INVALID_IGNORE;
5571 BT_DBG("Invalid - txseq outside tx window");
5572 return L2CAP_TXSEQ_INVALID;
5575 BT_DBG("Unexpected - txseq indicates missing frames");
5576 return L2CAP_TXSEQ_UNEXPECTED;
5580 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5581 struct l2cap_ctrl *control,
5582 struct sk_buff *skb, u8 event)
5585 bool skb_in_use = 0;
5587 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5591 case L2CAP_EV_RECV_IFRAME:
5592 switch (l2cap_classify_txseq(chan, control->txseq)) {
5593 case L2CAP_TXSEQ_EXPECTED:
5594 l2cap_pass_to_tx(chan, control);
5596 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5597 BT_DBG("Busy, discarding expected seq %d",
5602 chan->expected_tx_seq = __next_seq(chan,
5605 chan->buffer_seq = chan->expected_tx_seq;
5608 err = l2cap_reassemble_sdu(chan, skb, control);
5612 if (control->final) {
5613 if (!test_and_clear_bit(CONN_REJ_ACT,
5614 &chan->conn_state)) {
5616 l2cap_retransmit_all(chan, control);
5617 l2cap_ertm_send(chan);
5621 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5622 l2cap_send_ack(chan);
5624 case L2CAP_TXSEQ_UNEXPECTED:
5625 l2cap_pass_to_tx(chan, control);
5627 /* Can't issue SREJ frames in the local busy state.
5628 * Drop this frame, it will be seen as missing
5629 * when local busy is exited.
5631 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5632 BT_DBG("Busy, discarding unexpected seq %d",
5637 /* There was a gap in the sequence, so an SREJ
5638 * must be sent for each missing frame. The
5639 * current frame is stored for later use.
5641 skb_queue_tail(&chan->srej_q, skb);
5643 BT_DBG("Queued %p (queue len %d)", skb,
5644 skb_queue_len(&chan->srej_q));
5646 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5647 l2cap_seq_list_clear(&chan->srej_list);
5648 l2cap_send_srej(chan, control->txseq);
5650 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5652 case L2CAP_TXSEQ_DUPLICATE:
5653 l2cap_pass_to_tx(chan, control);
5655 case L2CAP_TXSEQ_INVALID_IGNORE:
5657 case L2CAP_TXSEQ_INVALID:
5659 l2cap_send_disconn_req(chan, ECONNRESET);
5663 case L2CAP_EV_RECV_RR:
5664 l2cap_pass_to_tx(chan, control);
5665 if (control->final) {
5666 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5668 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5669 !__chan_is_moving(chan)) {
5671 l2cap_retransmit_all(chan, control);
5674 l2cap_ertm_send(chan);
5675 } else if (control->poll) {
5676 l2cap_send_i_or_rr_or_rnr(chan);
5678 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5679 &chan->conn_state) &&
5680 chan->unacked_frames)
5681 __set_retrans_timer(chan);
5683 l2cap_ertm_send(chan);
5686 case L2CAP_EV_RECV_RNR:
5687 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5688 l2cap_pass_to_tx(chan, control);
5689 if (control && control->poll) {
5690 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5691 l2cap_send_rr_or_rnr(chan, 0);
5693 __clear_retrans_timer(chan);
5694 l2cap_seq_list_clear(&chan->retrans_list);
5696 case L2CAP_EV_RECV_REJ:
5697 l2cap_handle_rej(chan, control);
5699 case L2CAP_EV_RECV_SREJ:
5700 l2cap_handle_srej(chan, control);
5706 if (skb && !skb_in_use) {
5707 BT_DBG("Freeing %p", skb);
5714 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5715 struct l2cap_ctrl *control,
5716 struct sk_buff *skb, u8 event)
5719 u16 txseq = control->txseq;
5720 bool skb_in_use = 0;
5722 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5726 case L2CAP_EV_RECV_IFRAME:
5727 switch (l2cap_classify_txseq(chan, txseq)) {
5728 case L2CAP_TXSEQ_EXPECTED:
5729 /* Keep frame for reassembly later */
5730 l2cap_pass_to_tx(chan, control);
5731 skb_queue_tail(&chan->srej_q, skb);
5733 BT_DBG("Queued %p (queue len %d)", skb,
5734 skb_queue_len(&chan->srej_q));
5736 chan->expected_tx_seq = __next_seq(chan, txseq);
5738 case L2CAP_TXSEQ_EXPECTED_SREJ:
5739 l2cap_seq_list_pop(&chan->srej_list);
5741 l2cap_pass_to_tx(chan, control);
5742 skb_queue_tail(&chan->srej_q, skb);
5744 BT_DBG("Queued %p (queue len %d)", skb,
5745 skb_queue_len(&chan->srej_q));
5747 err = l2cap_rx_queued_iframes(chan);
5752 case L2CAP_TXSEQ_UNEXPECTED:
5753 /* Got a frame that can't be reassembled yet.
5754 * Save it for later, and send SREJs to cover
5755 * the missing frames.
5757 skb_queue_tail(&chan->srej_q, skb);
5759 BT_DBG("Queued %p (queue len %d)", skb,
5760 skb_queue_len(&chan->srej_q));
5762 l2cap_pass_to_tx(chan, control);
5763 l2cap_send_srej(chan, control->txseq);
5765 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5766 /* This frame was requested with an SREJ, but
5767 * some expected retransmitted frames are
5768 * missing. Request retransmission of missing
5771 skb_queue_tail(&chan->srej_q, skb);
5773 BT_DBG("Queued %p (queue len %d)", skb,
5774 skb_queue_len(&chan->srej_q));
5776 l2cap_pass_to_tx(chan, control);
5777 l2cap_send_srej_list(chan, control->txseq);
5779 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5780 /* We've already queued this frame. Drop this copy. */
5781 l2cap_pass_to_tx(chan, control);
5783 case L2CAP_TXSEQ_DUPLICATE:
5784 /* Expecting a later sequence number, so this frame
5785 * was already received. Ignore it completely.
5788 case L2CAP_TXSEQ_INVALID_IGNORE:
5790 case L2CAP_TXSEQ_INVALID:
5792 l2cap_send_disconn_req(chan, ECONNRESET);
5796 case L2CAP_EV_RECV_RR:
5797 l2cap_pass_to_tx(chan, control);
5798 if (control->final) {
5799 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5801 if (!test_and_clear_bit(CONN_REJ_ACT,
5802 &chan->conn_state)) {
5804 l2cap_retransmit_all(chan, control);
5807 l2cap_ertm_send(chan);
5808 } else if (control->poll) {
5809 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5810 &chan->conn_state) &&
5811 chan->unacked_frames) {
5812 __set_retrans_timer(chan);
5815 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5816 l2cap_send_srej_tail(chan);
5818 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5819 &chan->conn_state) &&
5820 chan->unacked_frames)
5821 __set_retrans_timer(chan);
5823 l2cap_send_ack(chan);
5826 case L2CAP_EV_RECV_RNR:
5827 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5828 l2cap_pass_to_tx(chan, control);
5829 if (control->poll) {
5830 l2cap_send_srej_tail(chan);
5832 struct l2cap_ctrl rr_control;
5833 memset(&rr_control, 0, sizeof(rr_control));
5834 rr_control.sframe = 1;
5835 rr_control.super = L2CAP_SUPER_RR;
5836 rr_control.reqseq = chan->buffer_seq;
5837 l2cap_send_sframe(chan, &rr_control);
5841 case L2CAP_EV_RECV_REJ:
5842 l2cap_handle_rej(chan, control);
5844 case L2CAP_EV_RECV_SREJ:
5845 l2cap_handle_srej(chan, control);
5849 if (skb && !skb_in_use) {
5850 BT_DBG("Freeing %p", skb);
5857 static int l2cap_finish_move(struct l2cap_chan *chan)
5859 BT_DBG("chan %p", chan);
5861 chan->rx_state = L2CAP_RX_STATE_RECV;
5864 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5866 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5868 return l2cap_resegment(chan);
5871 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5872 struct l2cap_ctrl *control,
5873 struct sk_buff *skb, u8 event)
5877 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5883 l2cap_process_reqseq(chan, control->reqseq);
5885 if (!skb_queue_empty(&chan->tx_q))
5886 chan->tx_send_head = skb_peek(&chan->tx_q);
5888 chan->tx_send_head = NULL;
5890 /* Rewind next_tx_seq to the point expected
5893 chan->next_tx_seq = control->reqseq;
5894 chan->unacked_frames = 0;
5896 err = l2cap_finish_move(chan);
5900 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5901 l2cap_send_i_or_rr_or_rnr(chan);
5903 if (event == L2CAP_EV_RECV_IFRAME)
5906 return l2cap_rx_state_recv(chan, control, NULL, event);
5909 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5910 struct l2cap_ctrl *control,
5911 struct sk_buff *skb, u8 event)
5915 if (!control->final)
5918 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5920 chan->rx_state = L2CAP_RX_STATE_RECV;
5921 l2cap_process_reqseq(chan, control->reqseq);
5923 if (!skb_queue_empty(&chan->tx_q))
5924 chan->tx_send_head = skb_peek(&chan->tx_q);
5926 chan->tx_send_head = NULL;
5928 /* Rewind next_tx_seq to the point expected
5931 chan->next_tx_seq = control->reqseq;
5932 chan->unacked_frames = 0;
5935 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5937 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5939 err = l2cap_resegment(chan);
5942 err = l2cap_rx_state_recv(chan, control, skb, event);
5947 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5949 /* Make sure reqseq is for a packet that has been sent but not acked */
5952 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5953 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5956 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5957 struct sk_buff *skb, u8 event)
5961 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5962 control, skb, event, chan->rx_state);
5964 if (__valid_reqseq(chan, control->reqseq)) {
5965 switch (chan->rx_state) {
5966 case L2CAP_RX_STATE_RECV:
5967 err = l2cap_rx_state_recv(chan, control, skb, event);
5969 case L2CAP_RX_STATE_SREJ_SENT:
5970 err = l2cap_rx_state_srej_sent(chan, control, skb,
5973 case L2CAP_RX_STATE_WAIT_P:
5974 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5976 case L2CAP_RX_STATE_WAIT_F:
5977 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5984 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5985 control->reqseq, chan->next_tx_seq,
5986 chan->expected_ack_seq);
5987 l2cap_send_disconn_req(chan, ECONNRESET);
5993 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5994 struct sk_buff *skb)
5998 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6001 if (l2cap_classify_txseq(chan, control->txseq) ==
6002 L2CAP_TXSEQ_EXPECTED) {
6003 l2cap_pass_to_tx(chan, control);
6005 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6006 __next_seq(chan, chan->buffer_seq));
6008 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6010 l2cap_reassemble_sdu(chan, skb, control);
6013 kfree_skb(chan->sdu);
6016 chan->sdu_last_frag = NULL;
6020 BT_DBG("Freeing %p", skb);
6025 chan->last_acked_seq = control->txseq;
6026 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6031 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6033 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6037 __unpack_control(chan, skb);
6042 * We can just drop the corrupted I-frame here.
6043 * Receiver will miss it and start proper recovery
6044 * procedures and ask for retransmission.
6046 if (l2cap_check_fcs(chan, skb))
6049 if (!control->sframe && control->sar == L2CAP_SAR_START)
6050 len -= L2CAP_SDULEN_SIZE;
6052 if (chan->fcs == L2CAP_FCS_CRC16)
6053 len -= L2CAP_FCS_SIZE;
6055 if (len > chan->mps) {
6056 l2cap_send_disconn_req(chan, ECONNRESET);
6060 if (!control->sframe) {
6063 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6064 control->sar, control->reqseq, control->final,
6067 /* Validate F-bit - F=0 always valid, F=1 only
6068 * valid in TX WAIT_F
6070 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6073 if (chan->mode != L2CAP_MODE_STREAMING) {
6074 event = L2CAP_EV_RECV_IFRAME;
6075 err = l2cap_rx(chan, control, skb, event);
6077 err = l2cap_stream_rx(chan, control, skb);
6081 l2cap_send_disconn_req(chan, ECONNRESET);
6083 const u8 rx_func_to_event[4] = {
6084 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6085 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6088 /* Only I-frames are expected in streaming mode */
6089 if (chan->mode == L2CAP_MODE_STREAMING)
6092 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6093 control->reqseq, control->final, control->poll,
6097 BT_ERR("Trailing bytes: %d in sframe", len);
6098 l2cap_send_disconn_req(chan, ECONNRESET);
6102 /* Validate F and P bits */
6103 if (control->final && (control->poll ||
6104 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6107 event = rx_func_to_event[control->super];
6108 if (l2cap_rx(chan, control, skb, event))
6109 l2cap_send_disconn_req(chan, ECONNRESET);
6119 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6120 struct sk_buff *skb)
6122 struct l2cap_chan *chan;
6124 chan = l2cap_get_chan_by_scid(conn, cid);
6126 if (cid == L2CAP_CID_A2MP) {
6127 chan = a2mp_channel_create(conn, skb);
6133 l2cap_chan_lock(chan);
6135 BT_DBG("unknown cid 0x%4.4x", cid);
6136 /* Drop packet and return */
6142 BT_DBG("chan %p, len %d", chan, skb->len);
6144 if (chan->state != BT_CONNECTED)
6147 switch (chan->mode) {
6148 case L2CAP_MODE_BASIC:
6149 /* If socket recv buffers overflows we drop data here
6150 * which is *bad* because L2CAP has to be reliable.
6151 * But we don't have any other choice. L2CAP doesn't
6152 * provide flow control mechanism. */
6154 if (chan->imtu < skb->len)
6157 if (!chan->ops->recv(chan, skb))
6161 case L2CAP_MODE_ERTM:
6162 case L2CAP_MODE_STREAMING:
6163 l2cap_data_rcv(chan, skb);
6167 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6175 l2cap_chan_unlock(chan);
6178 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6179 struct sk_buff *skb)
6181 struct l2cap_chan *chan;
6183 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6187 BT_DBG("chan %p, len %d", chan, skb->len);
6189 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6192 if (chan->imtu < skb->len)
6195 if (!chan->ops->recv(chan, skb))
6202 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6203 struct sk_buff *skb)
6205 struct l2cap_chan *chan;
6207 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6211 BT_DBG("chan %p, len %d", chan, skb->len);
6213 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6216 if (chan->imtu < skb->len)
6219 if (!chan->ops->recv(chan, skb))
6226 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6228 struct l2cap_hdr *lh = (void *) skb->data;
6232 skb_pull(skb, L2CAP_HDR_SIZE);
6233 cid = __le16_to_cpu(lh->cid);
6234 len = __le16_to_cpu(lh->len);
6236 if (len != skb->len) {
6241 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6244 case L2CAP_CID_LE_SIGNALING:
6245 case L2CAP_CID_SIGNALING:
6246 l2cap_sig_channel(conn, skb);
6249 case L2CAP_CID_CONN_LESS:
6250 psm = get_unaligned((__le16 *) skb->data);
6251 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6252 l2cap_conless_channel(conn, psm, skb);
6255 case L2CAP_CID_LE_DATA:
6256 l2cap_att_channel(conn, cid, skb);
6260 if (smp_sig_channel(conn, skb))
6261 l2cap_conn_del(conn->hcon, EACCES);
6265 l2cap_data_channel(conn, cid, skb);
6270 /* ---- L2CAP interface with lower layer (HCI) ---- */
6272 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6274 int exact = 0, lm1 = 0, lm2 = 0;
6275 struct l2cap_chan *c;
6277 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6279 /* Find listening sockets and check their link_mode */
6280 read_lock(&chan_list_lock);
6281 list_for_each_entry(c, &chan_list, global_l) {
6282 struct sock *sk = c->sk;
6284 if (c->state != BT_LISTEN)
6287 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6288 lm1 |= HCI_LM_ACCEPT;
6289 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6290 lm1 |= HCI_LM_MASTER;
6292 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6293 lm2 |= HCI_LM_ACCEPT;
6294 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6295 lm2 |= HCI_LM_MASTER;
6298 read_unlock(&chan_list_lock);
6300 return exact ? lm1 : lm2;
6303 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6305 struct l2cap_conn *conn;
6307 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6310 conn = l2cap_conn_add(hcon, status);
6312 l2cap_conn_ready(conn);
6314 l2cap_conn_del(hcon, bt_to_errno(status));
6318 int l2cap_disconn_ind(struct hci_conn *hcon)
6320 struct l2cap_conn *conn = hcon->l2cap_data;
6322 BT_DBG("hcon %p", hcon);
6325 return HCI_ERROR_REMOTE_USER_TERM;
6326 return conn->disc_reason;
6329 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6331 BT_DBG("hcon %p reason %d", hcon, reason);
6333 l2cap_conn_del(hcon, bt_to_errno(reason));
6336 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6338 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6341 if (encrypt == 0x00) {
6342 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6343 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6344 } else if (chan->sec_level == BT_SECURITY_HIGH)
6345 l2cap_chan_close(chan, ECONNREFUSED);
6347 if (chan->sec_level == BT_SECURITY_MEDIUM)
6348 __clear_chan_timer(chan);
6352 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6354 struct l2cap_conn *conn = hcon->l2cap_data;
6355 struct l2cap_chan *chan;
6360 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6362 if (hcon->type == LE_LINK) {
6363 if (!status && encrypt)
6364 smp_distribute_keys(conn, 0);
6365 cancel_delayed_work(&conn->security_timer);
6368 mutex_lock(&conn->chan_lock);
6370 list_for_each_entry(chan, &conn->chan_l, list) {
6371 l2cap_chan_lock(chan);
6373 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6374 state_to_string(chan->state));
6376 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6377 l2cap_chan_unlock(chan);
6381 if (chan->scid == L2CAP_CID_LE_DATA) {
6382 if (!status && encrypt) {
6383 chan->sec_level = hcon->sec_level;
6384 l2cap_chan_ready(chan);
6387 l2cap_chan_unlock(chan);
6391 if (!__l2cap_no_conn_pending(chan)) {
6392 l2cap_chan_unlock(chan);
6396 if (!status && (chan->state == BT_CONNECTED ||
6397 chan->state == BT_CONFIG)) {
6398 struct sock *sk = chan->sk;
6400 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6401 sk->sk_state_change(sk);
6403 l2cap_check_encryption(chan, encrypt);
6404 l2cap_chan_unlock(chan);
6408 if (chan->state == BT_CONNECT) {
6410 l2cap_start_connection(chan);
6412 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6414 } else if (chan->state == BT_CONNECT2) {
6415 struct sock *sk = chan->sk;
6416 struct l2cap_conn_rsp rsp;
6422 if (test_bit(BT_SK_DEFER_SETUP,
6423 &bt_sk(sk)->flags)) {
6424 res = L2CAP_CR_PEND;
6425 stat = L2CAP_CS_AUTHOR_PEND;
6426 chan->ops->defer(chan);
6428 __l2cap_state_change(chan, BT_CONFIG);
6429 res = L2CAP_CR_SUCCESS;
6430 stat = L2CAP_CS_NO_INFO;
6433 __l2cap_state_change(chan, BT_DISCONN);
6434 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6435 res = L2CAP_CR_SEC_BLOCK;
6436 stat = L2CAP_CS_NO_INFO;
6441 rsp.scid = cpu_to_le16(chan->dcid);
6442 rsp.dcid = cpu_to_le16(chan->scid);
6443 rsp.result = cpu_to_le16(res);
6444 rsp.status = cpu_to_le16(stat);
6445 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6448 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6449 res == L2CAP_CR_SUCCESS) {
6451 set_bit(CONF_REQ_SENT, &chan->conf_state);
6452 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6454 l2cap_build_conf_req(chan, buf),
6456 chan->num_conf_req++;
6460 l2cap_chan_unlock(chan);
6463 mutex_unlock(&conn->chan_lock);
6468 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6470 struct l2cap_conn *conn = hcon->l2cap_data;
6471 struct l2cap_hdr *hdr;
6474 /* For AMP controller do not create l2cap conn */
6475 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6479 conn = l2cap_conn_add(hcon, 0);
6484 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6488 case ACL_START_NO_FLUSH:
6491 BT_ERR("Unexpected start frame (len %d)", skb->len);
6492 kfree_skb(conn->rx_skb);
6493 conn->rx_skb = NULL;
6495 l2cap_conn_unreliable(conn, ECOMM);
6498 /* Start fragment always begin with Basic L2CAP header */
6499 if (skb->len < L2CAP_HDR_SIZE) {
6500 BT_ERR("Frame is too short (len %d)", skb->len);
6501 l2cap_conn_unreliable(conn, ECOMM);
6505 hdr = (struct l2cap_hdr *) skb->data;
6506 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6508 if (len == skb->len) {
6509 /* Complete frame received */
6510 l2cap_recv_frame(conn, skb);
6514 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6516 if (skb->len > len) {
6517 BT_ERR("Frame is too long (len %d, expected len %d)",
6519 l2cap_conn_unreliable(conn, ECOMM);
6523 /* Allocate skb for the complete frame (with header) */
6524 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6528 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6530 conn->rx_len = len - skb->len;
6534 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6536 if (!conn->rx_len) {
6537 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6538 l2cap_conn_unreliable(conn, ECOMM);
6542 if (skb->len > conn->rx_len) {
6543 BT_ERR("Fragment is too long (len %d, expected %d)",
6544 skb->len, conn->rx_len);
6545 kfree_skb(conn->rx_skb);
6546 conn->rx_skb = NULL;
6548 l2cap_conn_unreliable(conn, ECOMM);
6552 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6554 conn->rx_len -= skb->len;
6556 if (!conn->rx_len) {
6557 /* Complete frame received */
6558 l2cap_recv_frame(conn, conn->rx_skb);
6559 conn->rx_skb = NULL;
6569 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6571 struct l2cap_chan *c;
6573 read_lock(&chan_list_lock);
6575 list_for_each_entry(c, &chan_list, global_l) {
6576 struct sock *sk = c->sk;
6578 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6579 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6580 c->state, __le16_to_cpu(c->psm),
6581 c->scid, c->dcid, c->imtu, c->omtu,
6582 c->sec_level, c->mode);
6585 read_unlock(&chan_list_lock);
6590 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6592 return single_open(file, l2cap_debugfs_show, inode->i_private);
6595 static const struct file_operations l2cap_debugfs_fops = {
6596 .open = l2cap_debugfs_open,
6598 .llseek = seq_lseek,
6599 .release = single_release,
6602 static struct dentry *l2cap_debugfs;
6604 int __init l2cap_init(void)
6608 err = l2cap_init_sockets();
6613 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6614 NULL, &l2cap_debugfs_fops);
6616 BT_ERR("Failed to create L2CAP debug file");
6622 void l2cap_exit(void)
6624 debugfs_remove(l2cap_debugfs);
6625 l2cap_cleanup_sockets();
6628 module_param(disable_ertm, bool, 0644);
6629 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");