2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 /* ---- L2CAP channels ---- */
64 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
69 list_for_each_entry(c, &conn->chan_l, list) {
76 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 /* Find channel with given SCID.
89 * Returns locked channel. */
90 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 mutex_lock(&conn->chan_lock);
96 c = __l2cap_get_chan_by_scid(conn, cid);
99 mutex_unlock(&conn->chan_lock);
104 /* Find channel with given DCID.
105 * Returns locked channel.
107 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
110 struct l2cap_chan *c;
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_dcid(conn, cid);
116 mutex_unlock(&conn->chan_lock);
121 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
124 struct l2cap_chan *c;
126 list_for_each_entry(c, &conn->chan_l, list) {
127 if (c->ident == ident)
133 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_ident(conn, ident);
142 mutex_unlock(&conn->chan_lock);
147 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
149 struct l2cap_chan *c;
151 list_for_each_entry(c, &chan_list, global_l) {
152 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
158 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
162 write_lock(&chan_list_lock);
164 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 for (p = 0x1001; p < 0x1100; p += 2)
178 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
179 chan->psm = cpu_to_le16(p);
180 chan->sport = cpu_to_le16(p);
187 write_unlock(&chan_list_lock);
191 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
193 write_lock(&chan_list_lock);
197 write_unlock(&chan_list_lock);
202 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
204 u16 cid = L2CAP_CID_DYN_START;
206 for (; cid < L2CAP_CID_DYN_END; cid++) {
207 if (!__l2cap_get_chan_by_scid(conn, cid))
214 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
216 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
217 state_to_string(state));
220 chan->ops->state_change(chan, state);
223 static void l2cap_state_change(struct l2cap_chan *chan, int state)
225 struct sock *sk = chan->sk;
228 __l2cap_state_change(chan, state);
232 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
234 struct sock *sk = chan->sk;
239 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
241 struct sock *sk = chan->sk;
244 __l2cap_chan_set_err(chan, err);
248 static void __set_retrans_timer(struct l2cap_chan *chan)
250 if (!delayed_work_pending(&chan->monitor_timer) &&
251 chan->retrans_timeout) {
252 l2cap_set_timer(chan, &chan->retrans_timer,
253 msecs_to_jiffies(chan->retrans_timeout));
257 static void __set_monitor_timer(struct l2cap_chan *chan)
259 __clear_retrans_timer(chan);
260 if (chan->monitor_timeout) {
261 l2cap_set_timer(chan, &chan->monitor_timer,
262 msecs_to_jiffies(chan->monitor_timeout));
266 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
271 skb_queue_walk(head, skb) {
272 if (bt_cb(skb)->control.txseq == seq)
279 /* ---- L2CAP sequence number lists ---- */
281 /* For ERTM, ordered lists of sequence numbers must be tracked for
282 * SREJ requests that are received and for frames that are to be
283 * retransmitted. These seq_list functions implement a singly-linked
284 * list in an array, where membership in the list can also be checked
285 * in constant time. Items can also be added to the tail of the list
286 * and removed from the head in constant time, without further memory
290 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
292 size_t alloc_size, i;
294 /* Allocated size is a power of 2 to map sequence numbers
295 * (which may be up to 14 bits) in to a smaller array that is
296 * sized for the negotiated ERTM transmit windows.
298 alloc_size = roundup_pow_of_two(size);
300 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
304 seq_list->mask = alloc_size - 1;
305 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
306 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
307 for (i = 0; i < alloc_size; i++)
308 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
313 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
315 kfree(seq_list->list);
318 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
321 /* Constant-time check for list membership */
322 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
325 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
327 u16 mask = seq_list->mask;
329 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
330 /* In case someone tries to pop the head of an empty list */
331 return L2CAP_SEQ_LIST_CLEAR;
332 } else if (seq_list->head == seq) {
333 /* Head can be removed in constant time */
334 seq_list->head = seq_list->list[seq & mask];
335 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
337 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
342 /* Walk the list to find the sequence number */
343 u16 prev = seq_list->head;
344 while (seq_list->list[prev & mask] != seq) {
345 prev = seq_list->list[prev & mask];
346 if (prev == L2CAP_SEQ_LIST_TAIL)
347 return L2CAP_SEQ_LIST_CLEAR;
350 /* Unlink the number from the list and clear it */
351 seq_list->list[prev & mask] = seq_list->list[seq & mask];
352 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
353 if (seq_list->tail == seq)
354 seq_list->tail = prev;
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
361 /* Remove the head in constant time */
362 return l2cap_seq_list_remove(seq_list, seq_list->head);
365 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
369 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
372 for (i = 0; i <= seq_list->mask; i++)
373 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
376 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
379 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
381 u16 mask = seq_list->mask;
383 /* All appends happen in constant time */
385 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
388 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
389 seq_list->head = seq;
391 seq_list->list[seq_list->tail & mask] = seq;
393 seq_list->tail = seq;
394 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
397 static void l2cap_chan_timeout(struct work_struct *work)
399 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
401 struct l2cap_conn *conn = chan->conn;
404 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
406 mutex_lock(&conn->chan_lock);
407 l2cap_chan_lock(chan);
409 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
410 reason = ECONNREFUSED;
411 else if (chan->state == BT_CONNECT &&
412 chan->sec_level != BT_SECURITY_SDP)
413 reason = ECONNREFUSED;
417 l2cap_chan_close(chan, reason);
419 l2cap_chan_unlock(chan);
421 chan->ops->close(chan);
422 mutex_unlock(&conn->chan_lock);
424 l2cap_chan_put(chan);
427 struct l2cap_chan *l2cap_chan_create(void)
429 struct l2cap_chan *chan;
431 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
435 mutex_init(&chan->lock);
437 write_lock(&chan_list_lock);
438 list_add(&chan->global_l, &chan_list);
439 write_unlock(&chan_list_lock);
441 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
443 chan->state = BT_OPEN;
445 kref_init(&chan->kref);
447 /* This flag is cleared in l2cap_chan_ready() */
448 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
450 BT_DBG("chan %p", chan);
455 static void l2cap_chan_destroy(struct kref *kref)
457 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
459 BT_DBG("chan %p", chan);
461 write_lock(&chan_list_lock);
462 list_del(&chan->global_l);
463 write_unlock(&chan_list_lock);
468 void l2cap_chan_hold(struct l2cap_chan *c)
470 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
475 void l2cap_chan_put(struct l2cap_chan *c)
477 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
479 kref_put(&c->kref, l2cap_chan_destroy);
482 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
484 chan->fcs = L2CAP_FCS_CRC16;
485 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
486 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
487 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
488 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
489 chan->sec_level = BT_SECURITY_LOW;
491 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
494 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
496 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
497 __le16_to_cpu(chan->psm), chan->dcid);
499 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
503 switch (chan->chan_type) {
504 case L2CAP_CHAN_CONN_ORIENTED:
505 if (conn->hcon->type == LE_LINK) {
507 chan->omtu = L2CAP_DEFAULT_MTU;
508 if (chan->dcid == L2CAP_CID_ATT)
509 chan->scid = L2CAP_CID_ATT;
511 chan->scid = l2cap_alloc_cid(conn);
513 /* Alloc CID for connection-oriented socket */
514 chan->scid = l2cap_alloc_cid(conn);
515 chan->omtu = L2CAP_DEFAULT_MTU;
519 case L2CAP_CHAN_CONN_LESS:
520 /* Connectionless socket */
521 chan->scid = L2CAP_CID_CONN_LESS;
522 chan->dcid = L2CAP_CID_CONN_LESS;
523 chan->omtu = L2CAP_DEFAULT_MTU;
526 case L2CAP_CHAN_CONN_FIX_A2MP:
527 chan->scid = L2CAP_CID_A2MP;
528 chan->dcid = L2CAP_CID_A2MP;
529 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
530 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
534 /* Raw socket can send/recv signalling messages only */
535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
537 chan->omtu = L2CAP_DEFAULT_MTU;
540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
547 l2cap_chan_hold(chan);
549 hci_conn_hold(conn->hcon);
551 list_add(&chan->list, &conn->chan_l);
554 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
556 mutex_lock(&conn->chan_lock);
557 __l2cap_chan_add(conn, chan);
558 mutex_unlock(&conn->chan_lock);
561 void l2cap_chan_del(struct l2cap_chan *chan, int err)
563 struct l2cap_conn *conn = chan->conn;
565 __clear_chan_timer(chan);
567 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
570 struct amp_mgr *mgr = conn->hcon->amp_mgr;
571 /* Delete from channel list */
572 list_del(&chan->list);
574 l2cap_chan_put(chan);
578 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
579 hci_conn_drop(conn->hcon);
581 if (mgr && mgr->bredr_chan == chan)
582 mgr->bredr_chan = NULL;
585 if (chan->hs_hchan) {
586 struct hci_chan *hs_hchan = chan->hs_hchan;
588 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
589 amp_disconnect_logical_link(hs_hchan);
592 chan->ops->teardown(chan, err);
594 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
598 case L2CAP_MODE_BASIC:
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
606 skb_queue_purge(&chan->srej_q);
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
621 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
623 struct l2cap_conn *conn = chan->conn;
624 struct sock *sk = chan->sk;
626 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
629 switch (chan->state) {
631 chan->ops->teardown(chan, 0);
636 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
637 conn->hcon->type == ACL_LINK) {
638 __set_chan_timer(chan, sk->sk_sndtimeo);
639 l2cap_send_disconn_req(chan, reason);
641 l2cap_chan_del(chan, reason);
645 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
646 conn->hcon->type == ACL_LINK) {
647 struct l2cap_conn_rsp rsp;
650 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
651 result = L2CAP_CR_SEC_BLOCK;
653 result = L2CAP_CR_BAD_PSM;
654 l2cap_state_change(chan, BT_DISCONN);
656 rsp.scid = cpu_to_le16(chan->dcid);
657 rsp.dcid = cpu_to_le16(chan->scid);
658 rsp.result = cpu_to_le16(result);
659 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
660 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
664 l2cap_chan_del(chan, reason);
669 l2cap_chan_del(chan, reason);
673 chan->ops->teardown(chan, 0);
678 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
680 switch (chan->chan_type) {
682 switch (chan->sec_level) {
683 case BT_SECURITY_HIGH:
684 return HCI_AT_DEDICATED_BONDING_MITM;
685 case BT_SECURITY_MEDIUM:
686 return HCI_AT_DEDICATED_BONDING;
688 return HCI_AT_NO_BONDING;
691 case L2CAP_CHAN_CONN_ORIENTED:
692 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
693 if (chan->sec_level == BT_SECURITY_LOW)
694 chan->sec_level = BT_SECURITY_SDP;
696 if (chan->sec_level == BT_SECURITY_HIGH)
697 return HCI_AT_NO_BONDING_MITM;
699 return HCI_AT_NO_BONDING;
703 switch (chan->sec_level) {
704 case BT_SECURITY_HIGH:
705 return HCI_AT_GENERAL_BONDING_MITM;
706 case BT_SECURITY_MEDIUM:
707 return HCI_AT_GENERAL_BONDING;
709 return HCI_AT_NO_BONDING;
715 /* Service level security */
716 int l2cap_chan_check_security(struct l2cap_chan *chan)
718 struct l2cap_conn *conn = chan->conn;
721 auth_type = l2cap_get_auth_type(chan);
723 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
726 static u8 l2cap_get_ident(struct l2cap_conn *conn)
730 /* Get next available identificator.
731 * 1 - 128 are used by kernel.
732 * 129 - 199 are reserved.
733 * 200 - 254 are used by utilities like l2ping, etc.
736 spin_lock(&conn->lock);
738 if (++conn->tx_ident > 128)
743 spin_unlock(&conn->lock);
748 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
751 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
754 BT_DBG("code 0x%2.2x", code);
759 if (lmp_no_flush_capable(conn->hcon->hdev))
760 flags = ACL_START_NO_FLUSH;
764 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
765 skb->priority = HCI_PRIO_MAX;
767 hci_send_acl(conn->hchan, skb, flags);
770 static bool __chan_is_moving(struct l2cap_chan *chan)
772 return chan->move_state != L2CAP_MOVE_STABLE &&
773 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
776 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
778 struct hci_conn *hcon = chan->conn->hcon;
781 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
784 if (chan->hs_hcon && !__chan_is_moving(chan)) {
786 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
793 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
794 lmp_no_flush_capable(hcon->hdev))
795 flags = ACL_START_NO_FLUSH;
799 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
800 hci_send_acl(chan->conn->hchan, skb, flags);
803 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
805 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
806 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
808 if (enh & L2CAP_CTRL_FRAME_TYPE) {
811 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
812 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
819 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
820 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
827 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
829 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
830 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
832 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
835 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
836 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
843 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
844 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
851 static inline void __unpack_control(struct l2cap_chan *chan,
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
855 __unpack_extended_control(get_unaligned_le32(skb->data),
856 &bt_cb(skb)->control);
857 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
859 __unpack_enhanced_control(get_unaligned_le16(skb->data),
860 &bt_cb(skb)->control);
861 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
865 static u32 __pack_extended_control(struct l2cap_ctrl *control)
869 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
870 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
872 if (control->sframe) {
873 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
874 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
875 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
877 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
878 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
884 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
888 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
889 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
891 if (control->sframe) {
892 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
893 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
894 packed |= L2CAP_CTRL_FRAME_TYPE;
896 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
897 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
903 static inline void __pack_control(struct l2cap_chan *chan,
904 struct l2cap_ctrl *control,
907 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
908 put_unaligned_le32(__pack_extended_control(control),
909 skb->data + L2CAP_HDR_SIZE);
911 put_unaligned_le16(__pack_enhanced_control(control),
912 skb->data + L2CAP_HDR_SIZE);
916 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
918 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
919 return L2CAP_EXT_HDR_SIZE;
921 return L2CAP_ENH_HDR_SIZE;
924 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
928 struct l2cap_hdr *lh;
929 int hlen = __ertm_hdr_size(chan);
931 if (chan->fcs == L2CAP_FCS_CRC16)
932 hlen += L2CAP_FCS_SIZE;
934 skb = bt_skb_alloc(hlen, GFP_KERNEL);
937 return ERR_PTR(-ENOMEM);
939 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
940 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
941 lh->cid = cpu_to_le16(chan->dcid);
943 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
944 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
946 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
948 if (chan->fcs == L2CAP_FCS_CRC16) {
949 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
950 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
953 skb->priority = HCI_PRIO_MAX;
957 static void l2cap_send_sframe(struct l2cap_chan *chan,
958 struct l2cap_ctrl *control)
963 BT_DBG("chan %p, control %p", chan, control);
965 if (!control->sframe)
968 if (__chan_is_moving(chan))
971 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
975 if (control->super == L2CAP_SUPER_RR)
976 clear_bit(CONN_RNR_SENT, &chan->conn_state);
977 else if (control->super == L2CAP_SUPER_RNR)
978 set_bit(CONN_RNR_SENT, &chan->conn_state);
980 if (control->super != L2CAP_SUPER_SREJ) {
981 chan->last_acked_seq = control->reqseq;
982 __clear_ack_timer(chan);
985 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
986 control->final, control->poll, control->super);
988 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
989 control_field = __pack_extended_control(control);
991 control_field = __pack_enhanced_control(control);
993 skb = l2cap_create_sframe_pdu(chan, control_field);
995 l2cap_do_send(chan, skb);
998 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1000 struct l2cap_ctrl control;
1002 BT_DBG("chan %p, poll %d", chan, poll);
1004 memset(&control, 0, sizeof(control));
1006 control.poll = poll;
1008 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1009 control.super = L2CAP_SUPER_RNR;
1011 control.super = L2CAP_SUPER_RR;
1013 control.reqseq = chan->buffer_seq;
1014 l2cap_send_sframe(chan, &control);
1017 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1019 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1022 static bool __amp_capable(struct l2cap_chan *chan)
1024 struct l2cap_conn *conn = chan->conn;
1025 struct hci_dev *hdev;
1026 bool amp_available = false;
1028 if (!conn->hs_enabled)
1031 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1034 read_lock(&hci_dev_list_lock);
1035 list_for_each_entry(hdev, &hci_dev_list, list) {
1036 if (hdev->amp_type != AMP_TYPE_BREDR &&
1037 test_bit(HCI_UP, &hdev->flags)) {
1038 amp_available = true;
1042 read_unlock(&hci_dev_list_lock);
1044 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1045 return amp_available;
1050 static bool l2cap_check_efs(struct l2cap_chan *chan)
1052 /* Check EFS parameters */
1056 void l2cap_send_conn_req(struct l2cap_chan *chan)
1058 struct l2cap_conn *conn = chan->conn;
1059 struct l2cap_conn_req req;
1061 req.scid = cpu_to_le16(chan->scid);
1062 req.psm = chan->psm;
1064 chan->ident = l2cap_get_ident(conn);
1066 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1068 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1071 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1073 struct l2cap_create_chan_req req;
1074 req.scid = cpu_to_le16(chan->scid);
1075 req.psm = chan->psm;
1076 req.amp_id = amp_id;
1078 chan->ident = l2cap_get_ident(chan->conn);
1080 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1084 static void l2cap_move_setup(struct l2cap_chan *chan)
1086 struct sk_buff *skb;
1088 BT_DBG("chan %p", chan);
1090 if (chan->mode != L2CAP_MODE_ERTM)
1093 __clear_retrans_timer(chan);
1094 __clear_monitor_timer(chan);
1095 __clear_ack_timer(chan);
1097 chan->retry_count = 0;
1098 skb_queue_walk(&chan->tx_q, skb) {
1099 if (bt_cb(skb)->control.retries)
1100 bt_cb(skb)->control.retries = 1;
1105 chan->expected_tx_seq = chan->buffer_seq;
1107 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1108 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1109 l2cap_seq_list_clear(&chan->retrans_list);
1110 l2cap_seq_list_clear(&chan->srej_list);
1111 skb_queue_purge(&chan->srej_q);
1113 chan->tx_state = L2CAP_TX_STATE_XMIT;
1114 chan->rx_state = L2CAP_RX_STATE_MOVE;
1116 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1119 static void l2cap_move_done(struct l2cap_chan *chan)
1121 u8 move_role = chan->move_role;
1122 BT_DBG("chan %p", chan);
1124 chan->move_state = L2CAP_MOVE_STABLE;
1125 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1127 if (chan->mode != L2CAP_MODE_ERTM)
1130 switch (move_role) {
1131 case L2CAP_MOVE_ROLE_INITIATOR:
1132 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1133 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1135 case L2CAP_MOVE_ROLE_RESPONDER:
1136 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1141 static void l2cap_chan_ready(struct l2cap_chan *chan)
1143 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1144 chan->conf_state = 0;
1145 __clear_chan_timer(chan);
1147 chan->state = BT_CONNECTED;
1149 chan->ops->ready(chan);
1152 static void l2cap_start_connection(struct l2cap_chan *chan)
1154 if (__amp_capable(chan)) {
1155 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1156 a2mp_discover_amp(chan);
1158 l2cap_send_conn_req(chan);
1162 static void l2cap_do_start(struct l2cap_chan *chan)
1164 struct l2cap_conn *conn = chan->conn;
1166 if (conn->hcon->type == LE_LINK) {
1167 l2cap_chan_ready(chan);
1171 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1172 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1175 if (l2cap_chan_check_security(chan) &&
1176 __l2cap_no_conn_pending(chan)) {
1177 l2cap_start_connection(chan);
1180 struct l2cap_info_req req;
1181 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1183 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1184 conn->info_ident = l2cap_get_ident(conn);
1186 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1188 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1193 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1195 u32 local_feat_mask = l2cap_feat_mask;
1197 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1200 case L2CAP_MODE_ERTM:
1201 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1202 case L2CAP_MODE_STREAMING:
1203 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1209 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1211 struct sock *sk = chan->sk;
1212 struct l2cap_conn *conn = chan->conn;
1213 struct l2cap_disconn_req req;
1218 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1219 __clear_retrans_timer(chan);
1220 __clear_monitor_timer(chan);
1221 __clear_ack_timer(chan);
1224 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1225 l2cap_state_change(chan, BT_DISCONN);
1229 req.dcid = cpu_to_le16(chan->dcid);
1230 req.scid = cpu_to_le16(chan->scid);
1231 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1235 __l2cap_state_change(chan, BT_DISCONN);
1236 __l2cap_chan_set_err(chan, err);
1240 /* ---- L2CAP connections ---- */
1241 static void l2cap_conn_start(struct l2cap_conn *conn)
1243 struct l2cap_chan *chan, *tmp;
1245 BT_DBG("conn %p", conn);
1247 mutex_lock(&conn->chan_lock);
1249 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1250 struct sock *sk = chan->sk;
1252 l2cap_chan_lock(chan);
1254 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1255 l2cap_chan_unlock(chan);
1259 if (chan->state == BT_CONNECT) {
1260 if (!l2cap_chan_check_security(chan) ||
1261 !__l2cap_no_conn_pending(chan)) {
1262 l2cap_chan_unlock(chan);
1266 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1267 && test_bit(CONF_STATE2_DEVICE,
1268 &chan->conf_state)) {
1269 l2cap_chan_close(chan, ECONNRESET);
1270 l2cap_chan_unlock(chan);
1274 l2cap_start_connection(chan);
1276 } else if (chan->state == BT_CONNECT2) {
1277 struct l2cap_conn_rsp rsp;
1279 rsp.scid = cpu_to_le16(chan->dcid);
1280 rsp.dcid = cpu_to_le16(chan->scid);
1282 if (l2cap_chan_check_security(chan)) {
1284 if (test_bit(BT_SK_DEFER_SETUP,
1285 &bt_sk(sk)->flags)) {
1286 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1287 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1288 chan->ops->defer(chan);
1291 __l2cap_state_change(chan, BT_CONFIG);
1292 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1293 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1297 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1298 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1301 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1304 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1305 rsp.result != L2CAP_CR_SUCCESS) {
1306 l2cap_chan_unlock(chan);
1310 set_bit(CONF_REQ_SENT, &chan->conf_state);
1311 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1312 l2cap_build_conf_req(chan, buf), buf);
1313 chan->num_conf_req++;
1316 l2cap_chan_unlock(chan);
1319 mutex_unlock(&conn->chan_lock);
1322 /* Find socket with cid and source/destination bdaddr.
1323 * Returns closest match, locked.
1325 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1329 struct l2cap_chan *c, *c1 = NULL;
1331 read_lock(&chan_list_lock);
1333 list_for_each_entry(c, &chan_list, global_l) {
1334 struct sock *sk = c->sk;
1336 if (state && c->state != state)
1339 if (c->scid == cid) {
1340 int src_match, dst_match;
1341 int src_any, dst_any;
1344 src_match = !bacmp(&bt_sk(sk)->src, src);
1345 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1346 if (src_match && dst_match) {
1347 read_unlock(&chan_list_lock);
1352 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1353 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1354 if ((src_match && dst_any) || (src_any && dst_match) ||
1355 (src_any && dst_any))
1360 read_unlock(&chan_list_lock);
1365 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1367 struct sock *parent;
1368 struct l2cap_chan *chan, *pchan;
1372 /* Check if we have socket listening on cid */
1373 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1374 conn->src, conn->dst);
1378 /* Client ATT sockets should override the server one */
1379 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1386 chan = pchan->ops->new_connection(pchan);
1390 chan->dcid = L2CAP_CID_ATT;
1392 bacpy(&bt_sk(chan->sk)->src, conn->src);
1393 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1395 __l2cap_chan_add(conn, chan);
1398 release_sock(parent);
1401 static void l2cap_conn_ready(struct l2cap_conn *conn)
1403 struct l2cap_chan *chan;
1404 struct hci_conn *hcon = conn->hcon;
1406 BT_DBG("conn %p", conn);
1408 /* For outgoing pairing which doesn't necessarily have an
1409 * associated socket (e.g. mgmt_pair_device).
1411 if (hcon->out && hcon->type == LE_LINK)
1412 smp_conn_security(hcon, hcon->pending_sec_level);
1414 mutex_lock(&conn->chan_lock);
1416 if (hcon->type == LE_LINK)
1417 l2cap_le_conn_ready(conn);
1419 list_for_each_entry(chan, &conn->chan_l, list) {
1421 l2cap_chan_lock(chan);
1423 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1424 l2cap_chan_unlock(chan);
1428 if (hcon->type == LE_LINK) {
1429 if (smp_conn_security(hcon, chan->sec_level))
1430 l2cap_chan_ready(chan);
1432 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1433 struct sock *sk = chan->sk;
1434 __clear_chan_timer(chan);
1436 __l2cap_state_change(chan, BT_CONNECTED);
1437 sk->sk_state_change(sk);
1440 } else if (chan->state == BT_CONNECT) {
1441 l2cap_do_start(chan);
1444 l2cap_chan_unlock(chan);
1447 mutex_unlock(&conn->chan_lock);
1450 /* Notify sockets that we cannot guaranty reliability anymore */
1451 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1453 struct l2cap_chan *chan;
1455 BT_DBG("conn %p", conn);
1457 mutex_lock(&conn->chan_lock);
1459 list_for_each_entry(chan, &conn->chan_l, list) {
1460 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1461 l2cap_chan_set_err(chan, err);
1464 mutex_unlock(&conn->chan_lock);
1467 static void l2cap_info_timeout(struct work_struct *work)
1469 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1472 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1473 conn->info_ident = 0;
1475 l2cap_conn_start(conn);
1480 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1481 * callback is called during registration. The ->remove callback is called
1482 * during unregistration.
1483 * An l2cap_user object can either be explicitly unregistered or when the
1484 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1485 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1486 * External modules must own a reference to the l2cap_conn object if they intend
1487 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1488 * any time if they don't.
1491 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1493 struct hci_dev *hdev = conn->hcon->hdev;
1496 /* We need to check whether l2cap_conn is registered. If it is not, we
1497 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1498 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1499 * relies on the parent hci_conn object to be locked. This itself relies
1500 * on the hci_dev object to be locked. So we must lock the hci device
1505 if (user->list.next || user->list.prev) {
1510 /* conn->hchan is NULL after l2cap_conn_del() was called */
1516 ret = user->probe(conn, user);
1520 list_add(&user->list, &conn->users);
1524 hci_dev_unlock(hdev);
1527 EXPORT_SYMBOL(l2cap_register_user);
1529 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1531 struct hci_dev *hdev = conn->hcon->hdev;
1535 if (!user->list.next || !user->list.prev)
1538 list_del(&user->list);
1539 user->list.next = NULL;
1540 user->list.prev = NULL;
1541 user->remove(conn, user);
1544 hci_dev_unlock(hdev);
1546 EXPORT_SYMBOL(l2cap_unregister_user);
1548 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1550 struct l2cap_user *user;
1552 while (!list_empty(&conn->users)) {
1553 user = list_first_entry(&conn->users, struct l2cap_user, list);
1554 list_del(&user->list);
1555 user->list.next = NULL;
1556 user->list.prev = NULL;
1557 user->remove(conn, user);
1561 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1563 struct l2cap_conn *conn = hcon->l2cap_data;
1564 struct l2cap_chan *chan, *l;
1569 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1571 kfree_skb(conn->rx_skb);
1573 l2cap_unregister_all_users(conn);
1575 mutex_lock(&conn->chan_lock);
1578 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1579 l2cap_chan_hold(chan);
1580 l2cap_chan_lock(chan);
1582 l2cap_chan_del(chan, err);
1584 l2cap_chan_unlock(chan);
1586 chan->ops->close(chan);
1587 l2cap_chan_put(chan);
1590 mutex_unlock(&conn->chan_lock);
1592 hci_chan_del(conn->hchan);
1594 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1595 cancel_delayed_work_sync(&conn->info_timer);
1597 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1598 cancel_delayed_work_sync(&conn->security_timer);
1599 smp_chan_destroy(conn);
1602 hcon->l2cap_data = NULL;
1604 l2cap_conn_put(conn);
1607 static void security_timeout(struct work_struct *work)
1609 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1610 security_timer.work);
1612 BT_DBG("conn %p", conn);
1614 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1615 smp_chan_destroy(conn);
1616 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1620 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1622 struct l2cap_conn *conn = hcon->l2cap_data;
1623 struct hci_chan *hchan;
1628 hchan = hci_chan_create(hcon);
1632 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1634 hci_chan_del(hchan);
1638 kref_init(&conn->ref);
1639 hcon->l2cap_data = conn;
1641 hci_conn_get(conn->hcon);
1642 conn->hchan = hchan;
1644 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1646 switch (hcon->type) {
1648 if (hcon->hdev->le_mtu) {
1649 conn->mtu = hcon->hdev->le_mtu;
1654 conn->mtu = hcon->hdev->acl_mtu;
1658 conn->src = &hcon->hdev->bdaddr;
1659 conn->dst = &hcon->dst;
1661 conn->feat_mask = 0;
1663 if (hcon->type == ACL_LINK)
1664 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1665 &hcon->hdev->dev_flags);
1667 spin_lock_init(&conn->lock);
1668 mutex_init(&conn->chan_lock);
1670 INIT_LIST_HEAD(&conn->chan_l);
1671 INIT_LIST_HEAD(&conn->users);
1673 if (hcon->type == LE_LINK)
1674 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1676 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1678 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1683 static void l2cap_conn_free(struct kref *ref)
1685 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1687 hci_conn_put(conn->hcon);
1691 void l2cap_conn_get(struct l2cap_conn *conn)
1693 kref_get(&conn->ref);
1695 EXPORT_SYMBOL(l2cap_conn_get);
1697 void l2cap_conn_put(struct l2cap_conn *conn)
1699 kref_put(&conn->ref, l2cap_conn_free);
1701 EXPORT_SYMBOL(l2cap_conn_put);
1703 /* ---- Socket interface ---- */
1705 /* Find socket with psm and source / destination bdaddr.
1706 * Returns closest match.
1708 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1712 struct l2cap_chan *c, *c1 = NULL;
1714 read_lock(&chan_list_lock);
1716 list_for_each_entry(c, &chan_list, global_l) {
1717 struct sock *sk = c->sk;
1719 if (state && c->state != state)
1722 if (c->psm == psm) {
1723 int src_match, dst_match;
1724 int src_any, dst_any;
1727 src_match = !bacmp(&bt_sk(sk)->src, src);
1728 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1729 if (src_match && dst_match) {
1730 read_unlock(&chan_list_lock);
1735 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1736 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1737 if ((src_match && dst_any) || (src_any && dst_match) ||
1738 (src_any && dst_any))
1743 read_unlock(&chan_list_lock);
1748 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1749 bdaddr_t *dst, u8 dst_type)
1751 struct sock *sk = chan->sk;
1752 bdaddr_t *src = &bt_sk(sk)->src;
1753 struct l2cap_conn *conn;
1754 struct hci_conn *hcon;
1755 struct hci_dev *hdev;
1759 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1760 dst_type, __le16_to_cpu(psm));
1762 hdev = hci_get_route(dst, src);
1764 return -EHOSTUNREACH;
1768 l2cap_chan_lock(chan);
1770 /* PSM must be odd and lsb of upper byte must be 0 */
1771 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1772 chan->chan_type != L2CAP_CHAN_RAW) {
1777 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1782 switch (chan->mode) {
1783 case L2CAP_MODE_BASIC:
1785 case L2CAP_MODE_ERTM:
1786 case L2CAP_MODE_STREAMING:
1795 switch (chan->state) {
1799 /* Already connecting */
1804 /* Already connected */
1818 /* Set destination address and psm */
1820 bacpy(&bt_sk(sk)->dst, dst);
1826 auth_type = l2cap_get_auth_type(chan);
1828 if (bdaddr_type_is_le(dst_type))
1829 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1830 chan->sec_level, auth_type);
1832 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1833 chan->sec_level, auth_type);
1836 err = PTR_ERR(hcon);
1840 conn = l2cap_conn_add(hcon);
1842 hci_conn_drop(hcon);
1847 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1848 hci_conn_drop(hcon);
1853 /* Update source addr of the socket */
1854 bacpy(src, conn->src);
1856 l2cap_chan_unlock(chan);
1857 l2cap_chan_add(conn, chan);
1858 l2cap_chan_lock(chan);
1860 /* l2cap_chan_add takes its own ref so we can drop this one */
1861 hci_conn_drop(hcon);
1863 l2cap_state_change(chan, BT_CONNECT);
1864 __set_chan_timer(chan, sk->sk_sndtimeo);
1866 if (hcon->state == BT_CONNECTED) {
1867 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1868 __clear_chan_timer(chan);
1869 if (l2cap_chan_check_security(chan))
1870 l2cap_state_change(chan, BT_CONNECTED);
1872 l2cap_do_start(chan);
1878 l2cap_chan_unlock(chan);
1879 hci_dev_unlock(hdev);
1884 int __l2cap_wait_ack(struct sock *sk)
1886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1887 DECLARE_WAITQUEUE(wait, current);
1891 add_wait_queue(sk_sleep(sk), &wait);
1892 set_current_state(TASK_INTERRUPTIBLE);
1893 while (chan->unacked_frames > 0 && chan->conn) {
1897 if (signal_pending(current)) {
1898 err = sock_intr_errno(timeo);
1903 timeo = schedule_timeout(timeo);
1905 set_current_state(TASK_INTERRUPTIBLE);
1907 err = sock_error(sk);
1911 set_current_state(TASK_RUNNING);
1912 remove_wait_queue(sk_sleep(sk), &wait);
1916 static void l2cap_monitor_timeout(struct work_struct *work)
1918 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1919 monitor_timer.work);
1921 BT_DBG("chan %p", chan);
1923 l2cap_chan_lock(chan);
1926 l2cap_chan_unlock(chan);
1927 l2cap_chan_put(chan);
1931 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1933 l2cap_chan_unlock(chan);
1934 l2cap_chan_put(chan);
1937 static void l2cap_retrans_timeout(struct work_struct *work)
1939 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1940 retrans_timer.work);
1942 BT_DBG("chan %p", chan);
1944 l2cap_chan_lock(chan);
1947 l2cap_chan_unlock(chan);
1948 l2cap_chan_put(chan);
1952 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1953 l2cap_chan_unlock(chan);
1954 l2cap_chan_put(chan);
1957 static void l2cap_streaming_send(struct l2cap_chan *chan,
1958 struct sk_buff_head *skbs)
1960 struct sk_buff *skb;
1961 struct l2cap_ctrl *control;
1963 BT_DBG("chan %p, skbs %p", chan, skbs);
1965 if (__chan_is_moving(chan))
1968 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1970 while (!skb_queue_empty(&chan->tx_q)) {
1972 skb = skb_dequeue(&chan->tx_q);
1974 bt_cb(skb)->control.retries = 1;
1975 control = &bt_cb(skb)->control;
1977 control->reqseq = 0;
1978 control->txseq = chan->next_tx_seq;
1980 __pack_control(chan, control, skb);
1982 if (chan->fcs == L2CAP_FCS_CRC16) {
1983 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1984 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1987 l2cap_do_send(chan, skb);
1989 BT_DBG("Sent txseq %u", control->txseq);
1991 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1992 chan->frames_sent++;
1996 static int l2cap_ertm_send(struct l2cap_chan *chan)
1998 struct sk_buff *skb, *tx_skb;
1999 struct l2cap_ctrl *control;
2002 BT_DBG("chan %p", chan);
2004 if (chan->state != BT_CONNECTED)
2007 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2010 if (__chan_is_moving(chan))
2013 while (chan->tx_send_head &&
2014 chan->unacked_frames < chan->remote_tx_win &&
2015 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2017 skb = chan->tx_send_head;
2019 bt_cb(skb)->control.retries = 1;
2020 control = &bt_cb(skb)->control;
2022 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2025 control->reqseq = chan->buffer_seq;
2026 chan->last_acked_seq = chan->buffer_seq;
2027 control->txseq = chan->next_tx_seq;
2029 __pack_control(chan, control, skb);
2031 if (chan->fcs == L2CAP_FCS_CRC16) {
2032 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2033 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2036 /* Clone after data has been modified. Data is assumed to be
2037 read-only (for locking purposes) on cloned sk_buffs.
2039 tx_skb = skb_clone(skb, GFP_KERNEL);
2044 __set_retrans_timer(chan);
2046 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2047 chan->unacked_frames++;
2048 chan->frames_sent++;
2051 if (skb_queue_is_last(&chan->tx_q, skb))
2052 chan->tx_send_head = NULL;
2054 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2056 l2cap_do_send(chan, tx_skb);
2057 BT_DBG("Sent txseq %u", control->txseq);
2060 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2061 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2066 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2068 struct l2cap_ctrl control;
2069 struct sk_buff *skb;
2070 struct sk_buff *tx_skb;
2073 BT_DBG("chan %p", chan);
2075 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2078 if (__chan_is_moving(chan))
2081 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2082 seq = l2cap_seq_list_pop(&chan->retrans_list);
2084 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2086 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2091 bt_cb(skb)->control.retries++;
2092 control = bt_cb(skb)->control;
2094 if (chan->max_tx != 0 &&
2095 bt_cb(skb)->control.retries > chan->max_tx) {
2096 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2097 l2cap_send_disconn_req(chan, ECONNRESET);
2098 l2cap_seq_list_clear(&chan->retrans_list);
2102 control.reqseq = chan->buffer_seq;
2103 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2108 if (skb_cloned(skb)) {
2109 /* Cloned sk_buffs are read-only, so we need a
2112 tx_skb = skb_copy(skb, GFP_KERNEL);
2114 tx_skb = skb_clone(skb, GFP_KERNEL);
2118 l2cap_seq_list_clear(&chan->retrans_list);
2122 /* Update skb contents */
2123 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2124 put_unaligned_le32(__pack_extended_control(&control),
2125 tx_skb->data + L2CAP_HDR_SIZE);
2127 put_unaligned_le16(__pack_enhanced_control(&control),
2128 tx_skb->data + L2CAP_HDR_SIZE);
2131 if (chan->fcs == L2CAP_FCS_CRC16) {
2132 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2133 put_unaligned_le16(fcs, skb_put(tx_skb,
2137 l2cap_do_send(chan, tx_skb);
2139 BT_DBG("Resent txseq %d", control.txseq);
2141 chan->last_acked_seq = chan->buffer_seq;
2145 static void l2cap_retransmit(struct l2cap_chan *chan,
2146 struct l2cap_ctrl *control)
2148 BT_DBG("chan %p, control %p", chan, control);
2150 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2151 l2cap_ertm_resend(chan);
2154 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2155 struct l2cap_ctrl *control)
2157 struct sk_buff *skb;
2159 BT_DBG("chan %p, control %p", chan, control);
2162 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2164 l2cap_seq_list_clear(&chan->retrans_list);
2166 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2169 if (chan->unacked_frames) {
2170 skb_queue_walk(&chan->tx_q, skb) {
2171 if (bt_cb(skb)->control.txseq == control->reqseq ||
2172 skb == chan->tx_send_head)
2176 skb_queue_walk_from(&chan->tx_q, skb) {
2177 if (skb == chan->tx_send_head)
2180 l2cap_seq_list_append(&chan->retrans_list,
2181 bt_cb(skb)->control.txseq);
2184 l2cap_ertm_resend(chan);
2188 static void l2cap_send_ack(struct l2cap_chan *chan)
2190 struct l2cap_ctrl control;
2191 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2192 chan->last_acked_seq);
2195 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2196 chan, chan->last_acked_seq, chan->buffer_seq);
2198 memset(&control, 0, sizeof(control));
2201 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2202 chan->rx_state == L2CAP_RX_STATE_RECV) {
2203 __clear_ack_timer(chan);
2204 control.super = L2CAP_SUPER_RNR;
2205 control.reqseq = chan->buffer_seq;
2206 l2cap_send_sframe(chan, &control);
2208 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2209 l2cap_ertm_send(chan);
2210 /* If any i-frames were sent, they included an ack */
2211 if (chan->buffer_seq == chan->last_acked_seq)
2215 /* Ack now if the window is 3/4ths full.
2216 * Calculate without mul or div
2218 threshold = chan->ack_win;
2219 threshold += threshold << 1;
2222 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2225 if (frames_to_ack >= threshold) {
2226 __clear_ack_timer(chan);
2227 control.super = L2CAP_SUPER_RR;
2228 control.reqseq = chan->buffer_seq;
2229 l2cap_send_sframe(chan, &control);
2234 __set_ack_timer(chan);
2238 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2239 struct msghdr *msg, int len,
2240 int count, struct sk_buff *skb)
2242 struct l2cap_conn *conn = chan->conn;
2243 struct sk_buff **frag;
2246 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2252 /* Continuation fragments (no L2CAP header) */
2253 frag = &skb_shinfo(skb)->frag_list;
2255 struct sk_buff *tmp;
2257 count = min_t(unsigned int, conn->mtu, len);
2259 tmp = chan->ops->alloc_skb(chan, count,
2260 msg->msg_flags & MSG_DONTWAIT);
2262 return PTR_ERR(tmp);
2266 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2269 (*frag)->priority = skb->priority;
2274 skb->len += (*frag)->len;
2275 skb->data_len += (*frag)->len;
2277 frag = &(*frag)->next;
2283 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2284 struct msghdr *msg, size_t len,
2287 struct l2cap_conn *conn = chan->conn;
2288 struct sk_buff *skb;
2289 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2290 struct l2cap_hdr *lh;
2292 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2293 __le16_to_cpu(chan->psm), len, priority);
2295 count = min_t(unsigned int, (conn->mtu - hlen), len);
2297 skb = chan->ops->alloc_skb(chan, count + hlen,
2298 msg->msg_flags & MSG_DONTWAIT);
2302 skb->priority = priority;
2304 /* Create L2CAP header */
2305 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2306 lh->cid = cpu_to_le16(chan->dcid);
2307 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2308 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2310 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2311 if (unlikely(err < 0)) {
2313 return ERR_PTR(err);
2318 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2319 struct msghdr *msg, size_t len,
2322 struct l2cap_conn *conn = chan->conn;
2323 struct sk_buff *skb;
2325 struct l2cap_hdr *lh;
2327 BT_DBG("chan %p len %zu", chan, len);
2329 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2331 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2332 msg->msg_flags & MSG_DONTWAIT);
2336 skb->priority = priority;
2338 /* Create L2CAP header */
2339 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2340 lh->cid = cpu_to_le16(chan->dcid);
2341 lh->len = cpu_to_le16(len);
2343 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2344 if (unlikely(err < 0)) {
2346 return ERR_PTR(err);
2351 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2352 struct msghdr *msg, size_t len,
2355 struct l2cap_conn *conn = chan->conn;
2356 struct sk_buff *skb;
2357 int err, count, hlen;
2358 struct l2cap_hdr *lh;
2360 BT_DBG("chan %p len %zu", chan, len);
2363 return ERR_PTR(-ENOTCONN);
2365 hlen = __ertm_hdr_size(chan);
2368 hlen += L2CAP_SDULEN_SIZE;
2370 if (chan->fcs == L2CAP_FCS_CRC16)
2371 hlen += L2CAP_FCS_SIZE;
2373 count = min_t(unsigned int, (conn->mtu - hlen), len);
2375 skb = chan->ops->alloc_skb(chan, count + hlen,
2376 msg->msg_flags & MSG_DONTWAIT);
2380 /* Create L2CAP header */
2381 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2382 lh->cid = cpu_to_le16(chan->dcid);
2383 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2385 /* Control header is populated later */
2386 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2387 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2389 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2392 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2394 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2395 if (unlikely(err < 0)) {
2397 return ERR_PTR(err);
2400 bt_cb(skb)->control.fcs = chan->fcs;
2401 bt_cb(skb)->control.retries = 0;
2405 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2406 struct sk_buff_head *seg_queue,
2407 struct msghdr *msg, size_t len)
2409 struct sk_buff *skb;
2414 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2416 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2417 * so fragmented skbs are not used. The HCI layer's handling
2418 * of fragmented skbs is not compatible with ERTM's queueing.
2421 /* PDU size is derived from the HCI MTU */
2422 pdu_len = chan->conn->mtu;
2424 /* Constrain PDU size for BR/EDR connections */
2426 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2428 /* Adjust for largest possible L2CAP overhead. */
2430 pdu_len -= L2CAP_FCS_SIZE;
2432 pdu_len -= __ertm_hdr_size(chan);
2434 /* Remote device may have requested smaller PDUs */
2435 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2437 if (len <= pdu_len) {
2438 sar = L2CAP_SAR_UNSEGMENTED;
2442 sar = L2CAP_SAR_START;
2444 pdu_len -= L2CAP_SDULEN_SIZE;
2448 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2451 __skb_queue_purge(seg_queue);
2452 return PTR_ERR(skb);
2455 bt_cb(skb)->control.sar = sar;
2456 __skb_queue_tail(seg_queue, skb);
2461 pdu_len += L2CAP_SDULEN_SIZE;
2464 if (len <= pdu_len) {
2465 sar = L2CAP_SAR_END;
2468 sar = L2CAP_SAR_CONTINUE;
2475 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2478 struct sk_buff *skb;
2480 struct sk_buff_head seg_queue;
2482 /* Connectionless channel */
2483 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2484 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2486 return PTR_ERR(skb);
2488 l2cap_do_send(chan, skb);
2492 switch (chan->mode) {
2493 case L2CAP_MODE_BASIC:
2494 /* Check outgoing MTU */
2495 if (len > chan->omtu)
2498 /* Create a basic PDU */
2499 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2501 return PTR_ERR(skb);
2503 l2cap_do_send(chan, skb);
2507 case L2CAP_MODE_ERTM:
2508 case L2CAP_MODE_STREAMING:
2509 /* Check outgoing MTU */
2510 if (len > chan->omtu) {
2515 __skb_queue_head_init(&seg_queue);
2517 /* Do segmentation before calling in to the state machine,
2518 * since it's possible to block while waiting for memory
2521 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2523 /* The channel could have been closed while segmenting,
2524 * check that it is still connected.
2526 if (chan->state != BT_CONNECTED) {
2527 __skb_queue_purge(&seg_queue);
2534 if (chan->mode == L2CAP_MODE_ERTM)
2535 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2537 l2cap_streaming_send(chan, &seg_queue);
2541 /* If the skbs were not queued for sending, they'll still be in
2542 * seg_queue and need to be purged.
2544 __skb_queue_purge(&seg_queue);
2548 BT_DBG("bad state %1.1x", chan->mode);
2555 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2557 struct l2cap_ctrl control;
2560 BT_DBG("chan %p, txseq %u", chan, txseq);
2562 memset(&control, 0, sizeof(control));
2564 control.super = L2CAP_SUPER_SREJ;
2566 for (seq = chan->expected_tx_seq; seq != txseq;
2567 seq = __next_seq(chan, seq)) {
2568 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2569 control.reqseq = seq;
2570 l2cap_send_sframe(chan, &control);
2571 l2cap_seq_list_append(&chan->srej_list, seq);
2575 chan->expected_tx_seq = __next_seq(chan, txseq);
2578 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2580 struct l2cap_ctrl control;
2582 BT_DBG("chan %p", chan);
2584 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2587 memset(&control, 0, sizeof(control));
2589 control.super = L2CAP_SUPER_SREJ;
2590 control.reqseq = chan->srej_list.tail;
2591 l2cap_send_sframe(chan, &control);
2594 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2596 struct l2cap_ctrl control;
2600 BT_DBG("chan %p, txseq %u", chan, txseq);
2602 memset(&control, 0, sizeof(control));
2604 control.super = L2CAP_SUPER_SREJ;
2606 /* Capture initial list head to allow only one pass through the list. */
2607 initial_head = chan->srej_list.head;
2610 seq = l2cap_seq_list_pop(&chan->srej_list);
2611 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2614 control.reqseq = seq;
2615 l2cap_send_sframe(chan, &control);
2616 l2cap_seq_list_append(&chan->srej_list, seq);
2617 } while (chan->srej_list.head != initial_head);
2620 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2622 struct sk_buff *acked_skb;
2625 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2627 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2630 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2631 chan->expected_ack_seq, chan->unacked_frames);
2633 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2634 ackseq = __next_seq(chan, ackseq)) {
2636 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2638 skb_unlink(acked_skb, &chan->tx_q);
2639 kfree_skb(acked_skb);
2640 chan->unacked_frames--;
2644 chan->expected_ack_seq = reqseq;
2646 if (chan->unacked_frames == 0)
2647 __clear_retrans_timer(chan);
2649 BT_DBG("unacked_frames %u", chan->unacked_frames);
2652 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2654 BT_DBG("chan %p", chan);
2656 chan->expected_tx_seq = chan->buffer_seq;
2657 l2cap_seq_list_clear(&chan->srej_list);
2658 skb_queue_purge(&chan->srej_q);
2659 chan->rx_state = L2CAP_RX_STATE_RECV;
2662 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2663 struct l2cap_ctrl *control,
2664 struct sk_buff_head *skbs, u8 event)
2666 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2670 case L2CAP_EV_DATA_REQUEST:
2671 if (chan->tx_send_head == NULL)
2672 chan->tx_send_head = skb_peek(skbs);
2674 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2675 l2cap_ertm_send(chan);
2677 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2678 BT_DBG("Enter LOCAL_BUSY");
2679 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2681 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2682 /* The SREJ_SENT state must be aborted if we are to
2683 * enter the LOCAL_BUSY state.
2685 l2cap_abort_rx_srej_sent(chan);
2688 l2cap_send_ack(chan);
2691 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2692 BT_DBG("Exit LOCAL_BUSY");
2693 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2695 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2696 struct l2cap_ctrl local_control;
2698 memset(&local_control, 0, sizeof(local_control));
2699 local_control.sframe = 1;
2700 local_control.super = L2CAP_SUPER_RR;
2701 local_control.poll = 1;
2702 local_control.reqseq = chan->buffer_seq;
2703 l2cap_send_sframe(chan, &local_control);
2705 chan->retry_count = 1;
2706 __set_monitor_timer(chan);
2707 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2710 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2711 l2cap_process_reqseq(chan, control->reqseq);
2713 case L2CAP_EV_EXPLICIT_POLL:
2714 l2cap_send_rr_or_rnr(chan, 1);
2715 chan->retry_count = 1;
2716 __set_monitor_timer(chan);
2717 __clear_ack_timer(chan);
2718 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2720 case L2CAP_EV_RETRANS_TO:
2721 l2cap_send_rr_or_rnr(chan, 1);
2722 chan->retry_count = 1;
2723 __set_monitor_timer(chan);
2724 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2726 case L2CAP_EV_RECV_FBIT:
2727 /* Nothing to process */
2734 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2735 struct l2cap_ctrl *control,
2736 struct sk_buff_head *skbs, u8 event)
2738 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2742 case L2CAP_EV_DATA_REQUEST:
2743 if (chan->tx_send_head == NULL)
2744 chan->tx_send_head = skb_peek(skbs);
2745 /* Queue data, but don't send. */
2746 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2748 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2749 BT_DBG("Enter LOCAL_BUSY");
2750 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2752 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2753 /* The SREJ_SENT state must be aborted if we are to
2754 * enter the LOCAL_BUSY state.
2756 l2cap_abort_rx_srej_sent(chan);
2759 l2cap_send_ack(chan);
2762 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2763 BT_DBG("Exit LOCAL_BUSY");
2764 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2766 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2767 struct l2cap_ctrl local_control;
2768 memset(&local_control, 0, sizeof(local_control));
2769 local_control.sframe = 1;
2770 local_control.super = L2CAP_SUPER_RR;
2771 local_control.poll = 1;
2772 local_control.reqseq = chan->buffer_seq;
2773 l2cap_send_sframe(chan, &local_control);
2775 chan->retry_count = 1;
2776 __set_monitor_timer(chan);
2777 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2780 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2781 l2cap_process_reqseq(chan, control->reqseq);
2785 case L2CAP_EV_RECV_FBIT:
2786 if (control && control->final) {
2787 __clear_monitor_timer(chan);
2788 if (chan->unacked_frames > 0)
2789 __set_retrans_timer(chan);
2790 chan->retry_count = 0;
2791 chan->tx_state = L2CAP_TX_STATE_XMIT;
2792 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2795 case L2CAP_EV_EXPLICIT_POLL:
2798 case L2CAP_EV_MONITOR_TO:
2799 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2800 l2cap_send_rr_or_rnr(chan, 1);
2801 __set_monitor_timer(chan);
2802 chan->retry_count++;
2804 l2cap_send_disconn_req(chan, ECONNABORTED);
2812 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2813 struct sk_buff_head *skbs, u8 event)
2815 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2816 chan, control, skbs, event, chan->tx_state);
2818 switch (chan->tx_state) {
2819 case L2CAP_TX_STATE_XMIT:
2820 l2cap_tx_state_xmit(chan, control, skbs, event);
2822 case L2CAP_TX_STATE_WAIT_F:
2823 l2cap_tx_state_wait_f(chan, control, skbs, event);
2831 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2832 struct l2cap_ctrl *control)
2834 BT_DBG("chan %p, control %p", chan, control);
2835 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2838 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2839 struct l2cap_ctrl *control)
2841 BT_DBG("chan %p, control %p", chan, control);
2842 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2845 /* Copy frame to all raw sockets on that connection */
2846 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2848 struct sk_buff *nskb;
2849 struct l2cap_chan *chan;
2851 BT_DBG("conn %p", conn);
2853 mutex_lock(&conn->chan_lock);
2855 list_for_each_entry(chan, &conn->chan_l, list) {
2856 struct sock *sk = chan->sk;
2857 if (chan->chan_type != L2CAP_CHAN_RAW)
2860 /* Don't send frame to the socket it came from */
2863 nskb = skb_clone(skb, GFP_KERNEL);
2867 if (chan->ops->recv(chan, nskb))
2871 mutex_unlock(&conn->chan_lock);
2874 /* ---- L2CAP signalling commands ---- */
2875 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2876 u8 ident, u16 dlen, void *data)
2878 struct sk_buff *skb, **frag;
2879 struct l2cap_cmd_hdr *cmd;
2880 struct l2cap_hdr *lh;
2883 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2884 conn, code, ident, dlen);
2886 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2889 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2890 count = min_t(unsigned int, conn->mtu, len);
2892 skb = bt_skb_alloc(count, GFP_KERNEL);
2896 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2897 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2899 if (conn->hcon->type == LE_LINK)
2900 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2902 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2904 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2907 cmd->len = cpu_to_le16(dlen);
2910 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2911 memcpy(skb_put(skb, count), data, count);
2917 /* Continuation fragments (no L2CAP header) */
2918 frag = &skb_shinfo(skb)->frag_list;
2920 count = min_t(unsigned int, conn->mtu, len);
2922 *frag = bt_skb_alloc(count, GFP_KERNEL);
2926 memcpy(skb_put(*frag, count), data, count);
2931 frag = &(*frag)->next;
2941 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2944 struct l2cap_conf_opt *opt = *ptr;
2947 len = L2CAP_CONF_OPT_SIZE + opt->len;
2955 *val = *((u8 *) opt->val);
2959 *val = get_unaligned_le16(opt->val);
2963 *val = get_unaligned_le32(opt->val);
2967 *val = (unsigned long) opt->val;
2971 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2975 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2977 struct l2cap_conf_opt *opt = *ptr;
2979 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2986 *((u8 *) opt->val) = val;
2990 put_unaligned_le16(val, opt->val);
2994 put_unaligned_le32(val, opt->val);
2998 memcpy(opt->val, (void *) val, len);
3002 *ptr += L2CAP_CONF_OPT_SIZE + len;
3005 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3007 struct l2cap_conf_efs efs;
3009 switch (chan->mode) {
3010 case L2CAP_MODE_ERTM:
3011 efs.id = chan->local_id;
3012 efs.stype = chan->local_stype;
3013 efs.msdu = cpu_to_le16(chan->local_msdu);
3014 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3015 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3016 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3019 case L2CAP_MODE_STREAMING:
3021 efs.stype = L2CAP_SERV_BESTEFFORT;
3022 efs.msdu = cpu_to_le16(chan->local_msdu);
3023 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3032 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3033 (unsigned long) &efs);
3036 static void l2cap_ack_timeout(struct work_struct *work)
3038 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3042 BT_DBG("chan %p", chan);
3044 l2cap_chan_lock(chan);
3046 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3047 chan->last_acked_seq);
3050 l2cap_send_rr_or_rnr(chan, 0);
3052 l2cap_chan_unlock(chan);
3053 l2cap_chan_put(chan);
3056 int l2cap_ertm_init(struct l2cap_chan *chan)
3060 chan->next_tx_seq = 0;
3061 chan->expected_tx_seq = 0;
3062 chan->expected_ack_seq = 0;
3063 chan->unacked_frames = 0;
3064 chan->buffer_seq = 0;
3065 chan->frames_sent = 0;
3066 chan->last_acked_seq = 0;
3068 chan->sdu_last_frag = NULL;
3071 skb_queue_head_init(&chan->tx_q);
3073 chan->local_amp_id = AMP_ID_BREDR;
3074 chan->move_id = AMP_ID_BREDR;
3075 chan->move_state = L2CAP_MOVE_STABLE;
3076 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3078 if (chan->mode != L2CAP_MODE_ERTM)
3081 chan->rx_state = L2CAP_RX_STATE_RECV;
3082 chan->tx_state = L2CAP_TX_STATE_XMIT;
3084 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3085 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3086 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3088 skb_queue_head_init(&chan->srej_q);
3090 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3094 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3096 l2cap_seq_list_free(&chan->srej_list);
3101 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3104 case L2CAP_MODE_STREAMING:
3105 case L2CAP_MODE_ERTM:
3106 if (l2cap_mode_supported(mode, remote_feat_mask))
3110 return L2CAP_MODE_BASIC;
3114 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3116 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3119 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3121 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3124 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3125 struct l2cap_conf_rfc *rfc)
3127 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3128 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3130 /* Class 1 devices have must have ERTM timeouts
3131 * exceeding the Link Supervision Timeout. The
3132 * default Link Supervision Timeout for AMP
3133 * controllers is 10 seconds.
3135 * Class 1 devices use 0xffffffff for their
3136 * best-effort flush timeout, so the clamping logic
3137 * will result in a timeout that meets the above
3138 * requirement. ERTM timeouts are 16-bit values, so
3139 * the maximum timeout is 65.535 seconds.
3142 /* Convert timeout to milliseconds and round */
3143 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3145 /* This is the recommended formula for class 2 devices
3146 * that start ERTM timers when packets are sent to the
3149 ertm_to = 3 * ertm_to + 500;
3151 if (ertm_to > 0xffff)
3154 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3155 rfc->monitor_timeout = rfc->retrans_timeout;
3157 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3158 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3162 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3164 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3165 __l2cap_ews_supported(chan->conn)) {
3166 /* use extended control field */
3167 set_bit(FLAG_EXT_CTRL, &chan->flags);
3168 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3170 chan->tx_win = min_t(u16, chan->tx_win,
3171 L2CAP_DEFAULT_TX_WINDOW);
3172 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3174 chan->ack_win = chan->tx_win;
3177 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3179 struct l2cap_conf_req *req = data;
3180 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3181 void *ptr = req->data;
3184 BT_DBG("chan %p", chan);
3186 if (chan->num_conf_req || chan->num_conf_rsp)
3189 switch (chan->mode) {
3190 case L2CAP_MODE_STREAMING:
3191 case L2CAP_MODE_ERTM:
3192 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3195 if (__l2cap_efs_supported(chan->conn))
3196 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3200 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3205 if (chan->imtu != L2CAP_DEFAULT_MTU)
3206 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3208 switch (chan->mode) {
3209 case L2CAP_MODE_BASIC:
3210 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3211 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3214 rfc.mode = L2CAP_MODE_BASIC;
3216 rfc.max_transmit = 0;
3217 rfc.retrans_timeout = 0;
3218 rfc.monitor_timeout = 0;
3219 rfc.max_pdu_size = 0;
3221 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3222 (unsigned long) &rfc);
3225 case L2CAP_MODE_ERTM:
3226 rfc.mode = L2CAP_MODE_ERTM;
3227 rfc.max_transmit = chan->max_tx;
3229 __l2cap_set_ertm_timeouts(chan, &rfc);
3231 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3232 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3234 rfc.max_pdu_size = cpu_to_le16(size);
3236 l2cap_txwin_setup(chan);
3238 rfc.txwin_size = min_t(u16, chan->tx_win,
3239 L2CAP_DEFAULT_TX_WINDOW);
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3242 (unsigned long) &rfc);
3244 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3245 l2cap_add_opt_efs(&ptr, chan);
3247 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3248 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3251 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3252 if (chan->fcs == L2CAP_FCS_NONE ||
3253 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3254 chan->fcs = L2CAP_FCS_NONE;
3255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3260 case L2CAP_MODE_STREAMING:
3261 l2cap_txwin_setup(chan);
3262 rfc.mode = L2CAP_MODE_STREAMING;
3264 rfc.max_transmit = 0;
3265 rfc.retrans_timeout = 0;
3266 rfc.monitor_timeout = 0;
3268 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3269 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3271 rfc.max_pdu_size = cpu_to_le16(size);
3273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3274 (unsigned long) &rfc);
3276 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3277 l2cap_add_opt_efs(&ptr, chan);
3279 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3280 if (chan->fcs == L2CAP_FCS_NONE ||
3281 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3282 chan->fcs = L2CAP_FCS_NONE;
3283 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3289 req->dcid = cpu_to_le16(chan->dcid);
3290 req->flags = __constant_cpu_to_le16(0);
3295 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3297 struct l2cap_conf_rsp *rsp = data;
3298 void *ptr = rsp->data;
3299 void *req = chan->conf_req;
3300 int len = chan->conf_len;
3301 int type, hint, olen;
3303 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3304 struct l2cap_conf_efs efs;
3306 u16 mtu = L2CAP_DEFAULT_MTU;
3307 u16 result = L2CAP_CONF_SUCCESS;
3310 BT_DBG("chan %p", chan);
3312 while (len >= L2CAP_CONF_OPT_SIZE) {
3313 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3315 hint = type & L2CAP_CONF_HINT;
3316 type &= L2CAP_CONF_MASK;
3319 case L2CAP_CONF_MTU:
3323 case L2CAP_CONF_FLUSH_TO:
3324 chan->flush_to = val;
3327 case L2CAP_CONF_QOS:
3330 case L2CAP_CONF_RFC:
3331 if (olen == sizeof(rfc))
3332 memcpy(&rfc, (void *) val, olen);
3335 case L2CAP_CONF_FCS:
3336 if (val == L2CAP_FCS_NONE)
3337 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3340 case L2CAP_CONF_EFS:
3342 if (olen == sizeof(efs))
3343 memcpy(&efs, (void *) val, olen);
3346 case L2CAP_CONF_EWS:
3347 if (!chan->conn->hs_enabled)
3348 return -ECONNREFUSED;
3350 set_bit(FLAG_EXT_CTRL, &chan->flags);
3351 set_bit(CONF_EWS_RECV, &chan->conf_state);
3352 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3353 chan->remote_tx_win = val;
3360 result = L2CAP_CONF_UNKNOWN;
3361 *((u8 *) ptr++) = type;
3366 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3369 switch (chan->mode) {
3370 case L2CAP_MODE_STREAMING:
3371 case L2CAP_MODE_ERTM:
3372 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3373 chan->mode = l2cap_select_mode(rfc.mode,
3374 chan->conn->feat_mask);
3379 if (__l2cap_efs_supported(chan->conn))
3380 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3382 return -ECONNREFUSED;
3385 if (chan->mode != rfc.mode)
3386 return -ECONNREFUSED;
3392 if (chan->mode != rfc.mode) {
3393 result = L2CAP_CONF_UNACCEPT;
3394 rfc.mode = chan->mode;
3396 if (chan->num_conf_rsp == 1)
3397 return -ECONNREFUSED;
3399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3400 (unsigned long) &rfc);
3403 if (result == L2CAP_CONF_SUCCESS) {
3404 /* Configure output options and let the other side know
3405 * which ones we don't like. */
3407 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3408 result = L2CAP_CONF_UNACCEPT;
3411 set_bit(CONF_MTU_DONE, &chan->conf_state);
3413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3416 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3417 efs.stype != L2CAP_SERV_NOTRAFIC &&
3418 efs.stype != chan->local_stype) {
3420 result = L2CAP_CONF_UNACCEPT;
3422 if (chan->num_conf_req >= 1)
3423 return -ECONNREFUSED;
3425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3427 (unsigned long) &efs);
3429 /* Send PENDING Conf Rsp */
3430 result = L2CAP_CONF_PENDING;
3431 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3436 case L2CAP_MODE_BASIC:
3437 chan->fcs = L2CAP_FCS_NONE;
3438 set_bit(CONF_MODE_DONE, &chan->conf_state);
3441 case L2CAP_MODE_ERTM:
3442 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3443 chan->remote_tx_win = rfc.txwin_size;
3445 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3447 chan->remote_max_tx = rfc.max_transmit;
3449 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3450 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3451 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3452 rfc.max_pdu_size = cpu_to_le16(size);
3453 chan->remote_mps = size;
3455 __l2cap_set_ertm_timeouts(chan, &rfc);
3457 set_bit(CONF_MODE_DONE, &chan->conf_state);
3459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3460 sizeof(rfc), (unsigned long) &rfc);
3462 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3463 chan->remote_id = efs.id;
3464 chan->remote_stype = efs.stype;
3465 chan->remote_msdu = le16_to_cpu(efs.msdu);
3466 chan->remote_flush_to =
3467 le32_to_cpu(efs.flush_to);
3468 chan->remote_acc_lat =
3469 le32_to_cpu(efs.acc_lat);
3470 chan->remote_sdu_itime =
3471 le32_to_cpu(efs.sdu_itime);
3472 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3474 (unsigned long) &efs);
3478 case L2CAP_MODE_STREAMING:
3479 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3480 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3481 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3482 rfc.max_pdu_size = cpu_to_le16(size);
3483 chan->remote_mps = size;
3485 set_bit(CONF_MODE_DONE, &chan->conf_state);
3487 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3488 (unsigned long) &rfc);
3493 result = L2CAP_CONF_UNACCEPT;
3495 memset(&rfc, 0, sizeof(rfc));
3496 rfc.mode = chan->mode;
3499 if (result == L2CAP_CONF_SUCCESS)
3500 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3502 rsp->scid = cpu_to_le16(chan->dcid);
3503 rsp->result = cpu_to_le16(result);
3504 rsp->flags = __constant_cpu_to_le16(0);
3509 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3510 void *data, u16 *result)
3512 struct l2cap_conf_req *req = data;
3513 void *ptr = req->data;
3516 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3517 struct l2cap_conf_efs efs;
3519 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3521 while (len >= L2CAP_CONF_OPT_SIZE) {
3522 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3525 case L2CAP_CONF_MTU:
3526 if (val < L2CAP_DEFAULT_MIN_MTU) {
3527 *result = L2CAP_CONF_UNACCEPT;
3528 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3531 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3534 case L2CAP_CONF_FLUSH_TO:
3535 chan->flush_to = val;
3536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3540 case L2CAP_CONF_RFC:
3541 if (olen == sizeof(rfc))
3542 memcpy(&rfc, (void *)val, olen);
3544 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3545 rfc.mode != chan->mode)
3546 return -ECONNREFUSED;
3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3551 sizeof(rfc), (unsigned long) &rfc);
3554 case L2CAP_CONF_EWS:
3555 chan->ack_win = min_t(u16, val, chan->ack_win);
3556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3560 case L2CAP_CONF_EFS:
3561 if (olen == sizeof(efs))
3562 memcpy(&efs, (void *)val, olen);
3564 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3565 efs.stype != L2CAP_SERV_NOTRAFIC &&
3566 efs.stype != chan->local_stype)
3567 return -ECONNREFUSED;
3569 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3570 (unsigned long) &efs);
3573 case L2CAP_CONF_FCS:
3574 if (*result == L2CAP_CONF_PENDING)
3575 if (val == L2CAP_FCS_NONE)
3576 set_bit(CONF_RECV_NO_FCS,
3582 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3583 return -ECONNREFUSED;
3585 chan->mode = rfc.mode;
3587 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3589 case L2CAP_MODE_ERTM:
3590 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3591 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3592 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3593 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3594 chan->ack_win = min_t(u16, chan->ack_win,
3597 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3598 chan->local_msdu = le16_to_cpu(efs.msdu);
3599 chan->local_sdu_itime =
3600 le32_to_cpu(efs.sdu_itime);
3601 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3602 chan->local_flush_to =
3603 le32_to_cpu(efs.flush_to);
3607 case L2CAP_MODE_STREAMING:
3608 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3612 req->dcid = cpu_to_le16(chan->dcid);
3613 req->flags = __constant_cpu_to_le16(0);
3618 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3619 u16 result, u16 flags)
3621 struct l2cap_conf_rsp *rsp = data;
3622 void *ptr = rsp->data;
3624 BT_DBG("chan %p", chan);
3626 rsp->scid = cpu_to_le16(chan->dcid);
3627 rsp->result = cpu_to_le16(result);
3628 rsp->flags = cpu_to_le16(flags);
3633 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3635 struct l2cap_conn_rsp rsp;
3636 struct l2cap_conn *conn = chan->conn;
3640 rsp.scid = cpu_to_le16(chan->dcid);
3641 rsp.dcid = cpu_to_le16(chan->scid);
3642 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3643 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3646 rsp_code = L2CAP_CREATE_CHAN_RSP;
3648 rsp_code = L2CAP_CONN_RSP;
3650 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3652 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3654 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3657 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3658 l2cap_build_conf_req(chan, buf), buf);
3659 chan->num_conf_req++;
3662 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3666 /* Use sane default values in case a misbehaving remote device
3667 * did not send an RFC or extended window size option.
3669 u16 txwin_ext = chan->ack_win;
3670 struct l2cap_conf_rfc rfc = {
3672 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3673 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3674 .max_pdu_size = cpu_to_le16(chan->imtu),
3675 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3678 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3680 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3683 while (len >= L2CAP_CONF_OPT_SIZE) {
3684 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3687 case L2CAP_CONF_RFC:
3688 if (olen == sizeof(rfc))
3689 memcpy(&rfc, (void *)val, olen);
3691 case L2CAP_CONF_EWS:
3698 case L2CAP_MODE_ERTM:
3699 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3700 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3701 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3702 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3703 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3705 chan->ack_win = min_t(u16, chan->ack_win,
3708 case L2CAP_MODE_STREAMING:
3709 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3713 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3714 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3717 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3719 if (cmd_len < sizeof(*rej))
3722 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3725 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3726 cmd->ident == conn->info_ident) {
3727 cancel_delayed_work(&conn->info_timer);
3729 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3730 conn->info_ident = 0;
3732 l2cap_conn_start(conn);
3738 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3739 struct l2cap_cmd_hdr *cmd,
3740 u8 *data, u8 rsp_code, u8 amp_id)
3742 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3743 struct l2cap_conn_rsp rsp;
3744 struct l2cap_chan *chan = NULL, *pchan;
3745 struct sock *parent, *sk = NULL;
3746 int result, status = L2CAP_CS_NO_INFO;
3748 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3749 __le16 psm = req->psm;
3751 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3753 /* Check if we have socket listening on psm */
3754 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3756 result = L2CAP_CR_BAD_PSM;
3762 mutex_lock(&conn->chan_lock);
3765 /* Check if the ACL is secure enough (if not SDP) */
3766 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3767 !hci_conn_check_link_mode(conn->hcon)) {
3768 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3769 result = L2CAP_CR_SEC_BLOCK;
3773 result = L2CAP_CR_NO_MEM;
3775 /* Check if we already have channel with that dcid */
3776 if (__l2cap_get_chan_by_dcid(conn, scid))
3779 chan = pchan->ops->new_connection(pchan);
3785 /* For certain devices (ex: HID mouse), support for authentication,
3786 * pairing and bonding is optional. For such devices, inorder to avoid
3787 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3788 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3790 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3792 bacpy(&bt_sk(sk)->src, conn->src);
3793 bacpy(&bt_sk(sk)->dst, conn->dst);
3796 chan->local_amp_id = amp_id;
3798 __l2cap_chan_add(conn, chan);
3802 __set_chan_timer(chan, sk->sk_sndtimeo);
3804 chan->ident = cmd->ident;
3806 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3807 if (l2cap_chan_check_security(chan)) {
3808 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3809 __l2cap_state_change(chan, BT_CONNECT2);
3810 result = L2CAP_CR_PEND;
3811 status = L2CAP_CS_AUTHOR_PEND;
3812 chan->ops->defer(chan);
3814 /* Force pending result for AMP controllers.
3815 * The connection will succeed after the
3816 * physical link is up.
3818 if (amp_id == AMP_ID_BREDR) {
3819 __l2cap_state_change(chan, BT_CONFIG);
3820 result = L2CAP_CR_SUCCESS;
3822 __l2cap_state_change(chan, BT_CONNECT2);
3823 result = L2CAP_CR_PEND;
3825 status = L2CAP_CS_NO_INFO;
3828 __l2cap_state_change(chan, BT_CONNECT2);
3829 result = L2CAP_CR_PEND;
3830 status = L2CAP_CS_AUTHEN_PEND;
3833 __l2cap_state_change(chan, BT_CONNECT2);
3834 result = L2CAP_CR_PEND;
3835 status = L2CAP_CS_NO_INFO;
3839 release_sock(parent);
3840 mutex_unlock(&conn->chan_lock);
3843 rsp.scid = cpu_to_le16(scid);
3844 rsp.dcid = cpu_to_le16(dcid);
3845 rsp.result = cpu_to_le16(result);
3846 rsp.status = cpu_to_le16(status);
3847 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3849 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3850 struct l2cap_info_req info;
3851 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3853 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3854 conn->info_ident = l2cap_get_ident(conn);
3856 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3858 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3859 sizeof(info), &info);
3862 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3863 result == L2CAP_CR_SUCCESS) {
3865 set_bit(CONF_REQ_SENT, &chan->conf_state);
3866 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3867 l2cap_build_conf_req(chan, buf), buf);
3868 chan->num_conf_req++;
3874 static int l2cap_connect_req(struct l2cap_conn *conn,
3875 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3877 struct hci_dev *hdev = conn->hcon->hdev;
3878 struct hci_conn *hcon = conn->hcon;
3880 if (cmd_len < sizeof(struct l2cap_conn_req))
3884 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3885 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3886 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3887 hcon->dst_type, 0, NULL, 0,
3889 hci_dev_unlock(hdev);
3891 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3895 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3896 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3899 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3900 u16 scid, dcid, result, status;
3901 struct l2cap_chan *chan;
3905 if (cmd_len < sizeof(*rsp))
3908 scid = __le16_to_cpu(rsp->scid);
3909 dcid = __le16_to_cpu(rsp->dcid);
3910 result = __le16_to_cpu(rsp->result);
3911 status = __le16_to_cpu(rsp->status);
3913 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3914 dcid, scid, result, status);
3916 mutex_lock(&conn->chan_lock);
3919 chan = __l2cap_get_chan_by_scid(conn, scid);
3925 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3934 l2cap_chan_lock(chan);
3937 case L2CAP_CR_SUCCESS:
3938 l2cap_state_change(chan, BT_CONFIG);
3941 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3943 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3946 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3947 l2cap_build_conf_req(chan, req), req);
3948 chan->num_conf_req++;
3952 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3956 l2cap_chan_del(chan, ECONNREFUSED);
3960 l2cap_chan_unlock(chan);
3963 mutex_unlock(&conn->chan_lock);
3968 static inline void set_default_fcs(struct l2cap_chan *chan)
3970 /* FCS is enabled only in ERTM or streaming mode, if one or both
3973 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3974 chan->fcs = L2CAP_FCS_NONE;
3975 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3976 chan->fcs = L2CAP_FCS_CRC16;
3979 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3980 u8 ident, u16 flags)
3982 struct l2cap_conn *conn = chan->conn;
3984 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3987 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3988 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3990 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3991 l2cap_build_conf_rsp(chan, data,
3992 L2CAP_CONF_SUCCESS, flags), data);
3995 static inline int l2cap_config_req(struct l2cap_conn *conn,
3996 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3999 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4002 struct l2cap_chan *chan;
4005 if (cmd_len < sizeof(*req))
4008 dcid = __le16_to_cpu(req->dcid);
4009 flags = __le16_to_cpu(req->flags);
4011 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4013 chan = l2cap_get_chan_by_scid(conn, dcid);
4017 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4018 struct l2cap_cmd_rej_cid rej;
4020 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4021 rej.scid = cpu_to_le16(chan->scid);
4022 rej.dcid = cpu_to_le16(chan->dcid);
4024 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4029 /* Reject if config buffer is too small. */
4030 len = cmd_len - sizeof(*req);
4031 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4032 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4033 l2cap_build_conf_rsp(chan, rsp,
4034 L2CAP_CONF_REJECT, flags), rsp);
4039 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4040 chan->conf_len += len;
4042 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4043 /* Incomplete config. Send empty response. */
4044 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4045 l2cap_build_conf_rsp(chan, rsp,
4046 L2CAP_CONF_SUCCESS, flags), rsp);
4050 /* Complete config. */
4051 len = l2cap_parse_conf_req(chan, rsp);
4053 l2cap_send_disconn_req(chan, ECONNRESET);
4057 chan->ident = cmd->ident;
4058 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4059 chan->num_conf_rsp++;
4061 /* Reset config buffer. */
4064 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4067 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4068 set_default_fcs(chan);
4070 if (chan->mode == L2CAP_MODE_ERTM ||
4071 chan->mode == L2CAP_MODE_STREAMING)
4072 err = l2cap_ertm_init(chan);
4075 l2cap_send_disconn_req(chan, -err);
4077 l2cap_chan_ready(chan);
4082 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4084 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4085 l2cap_build_conf_req(chan, buf), buf);
4086 chan->num_conf_req++;
4089 /* Got Conf Rsp PENDING from remote side and asume we sent
4090 Conf Rsp PENDING in the code above */
4091 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4092 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4094 /* check compatibility */
4096 /* Send rsp for BR/EDR channel */
4098 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4100 chan->ident = cmd->ident;
4104 l2cap_chan_unlock(chan);
4108 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4109 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4112 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4113 u16 scid, flags, result;
4114 struct l2cap_chan *chan;
4115 int len = cmd_len - sizeof(*rsp);
4118 if (cmd_len < sizeof(*rsp))
4121 scid = __le16_to_cpu(rsp->scid);
4122 flags = __le16_to_cpu(rsp->flags);
4123 result = __le16_to_cpu(rsp->result);
4125 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4128 chan = l2cap_get_chan_by_scid(conn, scid);
4133 case L2CAP_CONF_SUCCESS:
4134 l2cap_conf_rfc_get(chan, rsp->data, len);
4135 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4138 case L2CAP_CONF_PENDING:
4139 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4141 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4144 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4147 l2cap_send_disconn_req(chan, ECONNRESET);
4151 if (!chan->hs_hcon) {
4152 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4155 if (l2cap_check_efs(chan)) {
4156 amp_create_logical_link(chan);
4157 chan->ident = cmd->ident;
4163 case L2CAP_CONF_UNACCEPT:
4164 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4167 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4168 l2cap_send_disconn_req(chan, ECONNRESET);
4172 /* throw out any old stored conf requests */
4173 result = L2CAP_CONF_SUCCESS;
4174 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4177 l2cap_send_disconn_req(chan, ECONNRESET);
4181 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4182 L2CAP_CONF_REQ, len, req);
4183 chan->num_conf_req++;
4184 if (result != L2CAP_CONF_SUCCESS)
4190 l2cap_chan_set_err(chan, ECONNRESET);
4192 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4193 l2cap_send_disconn_req(chan, ECONNRESET);
4197 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4200 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4202 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4203 set_default_fcs(chan);
4205 if (chan->mode == L2CAP_MODE_ERTM ||
4206 chan->mode == L2CAP_MODE_STREAMING)
4207 err = l2cap_ertm_init(chan);
4210 l2cap_send_disconn_req(chan, -err);
4212 l2cap_chan_ready(chan);
4216 l2cap_chan_unlock(chan);
4220 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4221 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4224 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4225 struct l2cap_disconn_rsp rsp;
4227 struct l2cap_chan *chan;
4230 if (cmd_len != sizeof(*req))
4233 scid = __le16_to_cpu(req->scid);
4234 dcid = __le16_to_cpu(req->dcid);
4236 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4238 mutex_lock(&conn->chan_lock);
4240 chan = __l2cap_get_chan_by_scid(conn, dcid);
4242 mutex_unlock(&conn->chan_lock);
4246 l2cap_chan_lock(chan);
4250 rsp.dcid = cpu_to_le16(chan->scid);
4251 rsp.scid = cpu_to_le16(chan->dcid);
4252 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4255 sk->sk_shutdown = SHUTDOWN_MASK;
4258 l2cap_chan_hold(chan);
4259 l2cap_chan_del(chan, ECONNRESET);
4261 l2cap_chan_unlock(chan);
4263 chan->ops->close(chan);
4264 l2cap_chan_put(chan);
4266 mutex_unlock(&conn->chan_lock);
4271 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4272 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4275 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4277 struct l2cap_chan *chan;
4279 if (cmd_len != sizeof(*rsp))
4282 scid = __le16_to_cpu(rsp->scid);
4283 dcid = __le16_to_cpu(rsp->dcid);
4285 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4287 mutex_lock(&conn->chan_lock);
4289 chan = __l2cap_get_chan_by_scid(conn, scid);
4291 mutex_unlock(&conn->chan_lock);
4295 l2cap_chan_lock(chan);
4297 l2cap_chan_hold(chan);
4298 l2cap_chan_del(chan, 0);
4300 l2cap_chan_unlock(chan);
4302 chan->ops->close(chan);
4303 l2cap_chan_put(chan);
4305 mutex_unlock(&conn->chan_lock);
4310 static inline int l2cap_information_req(struct l2cap_conn *conn,
4311 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4314 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4317 if (cmd_len != sizeof(*req))
4320 type = __le16_to_cpu(req->type);
4322 BT_DBG("type 0x%4.4x", type);
4324 if (type == L2CAP_IT_FEAT_MASK) {
4326 u32 feat_mask = l2cap_feat_mask;
4327 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4328 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4329 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4331 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4333 if (conn->hs_enabled)
4334 feat_mask |= L2CAP_FEAT_EXT_FLOW
4335 | L2CAP_FEAT_EXT_WINDOW;
4337 put_unaligned_le32(feat_mask, rsp->data);
4338 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4340 } else if (type == L2CAP_IT_FIXED_CHAN) {
4342 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4344 if (conn->hs_enabled)
4345 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4347 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4349 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4350 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4351 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4352 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4355 struct l2cap_info_rsp rsp;
4356 rsp.type = cpu_to_le16(type);
4357 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4358 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4365 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4366 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4369 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4372 if (cmd_len < sizeof(*rsp))
4375 type = __le16_to_cpu(rsp->type);
4376 result = __le16_to_cpu(rsp->result);
4378 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4380 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4381 if (cmd->ident != conn->info_ident ||
4382 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4385 cancel_delayed_work(&conn->info_timer);
4387 if (result != L2CAP_IR_SUCCESS) {
4388 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4389 conn->info_ident = 0;
4391 l2cap_conn_start(conn);
4397 case L2CAP_IT_FEAT_MASK:
4398 conn->feat_mask = get_unaligned_le32(rsp->data);
4400 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4401 struct l2cap_info_req req;
4402 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4404 conn->info_ident = l2cap_get_ident(conn);
4406 l2cap_send_cmd(conn, conn->info_ident,
4407 L2CAP_INFO_REQ, sizeof(req), &req);
4409 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4410 conn->info_ident = 0;
4412 l2cap_conn_start(conn);
4416 case L2CAP_IT_FIXED_CHAN:
4417 conn->fixed_chan_mask = rsp->data[0];
4418 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4419 conn->info_ident = 0;
4421 l2cap_conn_start(conn);
4428 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4429 struct l2cap_cmd_hdr *cmd,
4430 u16 cmd_len, void *data)
4432 struct l2cap_create_chan_req *req = data;
4433 struct l2cap_create_chan_rsp rsp;
4434 struct l2cap_chan *chan;
4435 struct hci_dev *hdev;
4438 if (cmd_len != sizeof(*req))
4441 if (!conn->hs_enabled)
4444 psm = le16_to_cpu(req->psm);
4445 scid = le16_to_cpu(req->scid);
4447 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4449 /* For controller id 0 make BR/EDR connection */
4450 if (req->amp_id == AMP_ID_BREDR) {
4451 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4456 /* Validate AMP controller id */
4457 hdev = hci_dev_get(req->amp_id);
4461 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4466 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4469 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4470 struct hci_conn *hs_hcon;
4472 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4478 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4480 mgr->bredr_chan = chan;
4481 chan->hs_hcon = hs_hcon;
4482 chan->fcs = L2CAP_FCS_NONE;
4483 conn->mtu = hdev->block_mtu;
4492 rsp.scid = cpu_to_le16(scid);
4493 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4494 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4496 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4502 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4504 struct l2cap_move_chan_req req;
4507 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4509 ident = l2cap_get_ident(chan->conn);
4510 chan->ident = ident;
4512 req.icid = cpu_to_le16(chan->scid);
4513 req.dest_amp_id = dest_amp_id;
4515 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4518 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4521 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4523 struct l2cap_move_chan_rsp rsp;
4525 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4527 rsp.icid = cpu_to_le16(chan->dcid);
4528 rsp.result = cpu_to_le16(result);
4530 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4534 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4536 struct l2cap_move_chan_cfm cfm;
4538 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4540 chan->ident = l2cap_get_ident(chan->conn);
4542 cfm.icid = cpu_to_le16(chan->scid);
4543 cfm.result = cpu_to_le16(result);
4545 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4548 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4551 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4553 struct l2cap_move_chan_cfm cfm;
4555 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4557 cfm.icid = cpu_to_le16(icid);
4558 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4560 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4564 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4567 struct l2cap_move_chan_cfm_rsp rsp;
4569 BT_DBG("icid 0x%4.4x", icid);
4571 rsp.icid = cpu_to_le16(icid);
4572 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4575 static void __release_logical_link(struct l2cap_chan *chan)
4577 chan->hs_hchan = NULL;
4578 chan->hs_hcon = NULL;
4580 /* Placeholder - release the logical link */
4583 static void l2cap_logical_fail(struct l2cap_chan *chan)
4585 /* Logical link setup failed */
4586 if (chan->state != BT_CONNECTED) {
4587 /* Create channel failure, disconnect */
4588 l2cap_send_disconn_req(chan, ECONNRESET);
4592 switch (chan->move_role) {
4593 case L2CAP_MOVE_ROLE_RESPONDER:
4594 l2cap_move_done(chan);
4595 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4597 case L2CAP_MOVE_ROLE_INITIATOR:
4598 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4599 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4600 /* Remote has only sent pending or
4601 * success responses, clean up
4603 l2cap_move_done(chan);
4606 /* Other amp move states imply that the move
4607 * has already aborted
4609 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4614 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4615 struct hci_chan *hchan)
4617 struct l2cap_conf_rsp rsp;
4619 chan->hs_hchan = hchan;
4620 chan->hs_hcon->l2cap_data = chan->conn;
4622 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4624 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4627 set_default_fcs(chan);
4629 err = l2cap_ertm_init(chan);
4631 l2cap_send_disconn_req(chan, -err);
4633 l2cap_chan_ready(chan);
4637 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4638 struct hci_chan *hchan)
4640 chan->hs_hcon = hchan->conn;
4641 chan->hs_hcon->l2cap_data = chan->conn;
4643 BT_DBG("move_state %d", chan->move_state);
4645 switch (chan->move_state) {
4646 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4647 /* Move confirm will be sent after a success
4648 * response is received
4650 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4652 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4653 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4654 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4655 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4656 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4657 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4658 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4659 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4660 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4664 /* Move was not in expected state, free the channel */
4665 __release_logical_link(chan);
4667 chan->move_state = L2CAP_MOVE_STABLE;
4671 /* Call with chan locked */
4672 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4675 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4678 l2cap_logical_fail(chan);
4679 __release_logical_link(chan);
4683 if (chan->state != BT_CONNECTED) {
4684 /* Ignore logical link if channel is on BR/EDR */
4685 if (chan->local_amp_id != AMP_ID_BREDR)
4686 l2cap_logical_finish_create(chan, hchan);
4688 l2cap_logical_finish_move(chan, hchan);
4692 void l2cap_move_start(struct l2cap_chan *chan)
4694 BT_DBG("chan %p", chan);
4696 if (chan->local_amp_id == AMP_ID_BREDR) {
4697 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4699 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4700 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4701 /* Placeholder - start physical link setup */
4703 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4704 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4706 l2cap_move_setup(chan);
4707 l2cap_send_move_chan_req(chan, 0);
4711 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4712 u8 local_amp_id, u8 remote_amp_id)
4714 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4715 local_amp_id, remote_amp_id);
4717 chan->fcs = L2CAP_FCS_NONE;
4719 /* Outgoing channel on AMP */
4720 if (chan->state == BT_CONNECT) {
4721 if (result == L2CAP_CR_SUCCESS) {
4722 chan->local_amp_id = local_amp_id;
4723 l2cap_send_create_chan_req(chan, remote_amp_id);
4725 /* Revert to BR/EDR connect */
4726 l2cap_send_conn_req(chan);
4732 /* Incoming channel on AMP */
4733 if (__l2cap_no_conn_pending(chan)) {
4734 struct l2cap_conn_rsp rsp;
4736 rsp.scid = cpu_to_le16(chan->dcid);
4737 rsp.dcid = cpu_to_le16(chan->scid);
4739 if (result == L2CAP_CR_SUCCESS) {
4740 /* Send successful response */
4741 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4742 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4744 /* Send negative response */
4745 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4746 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4749 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4752 if (result == L2CAP_CR_SUCCESS) {
4753 __l2cap_state_change(chan, BT_CONFIG);
4754 set_bit(CONF_REQ_SENT, &chan->conf_state);
4755 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4757 l2cap_build_conf_req(chan, buf), buf);
4758 chan->num_conf_req++;
4763 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4766 l2cap_move_setup(chan);
4767 chan->move_id = local_amp_id;
4768 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4770 l2cap_send_move_chan_req(chan, remote_amp_id);
4773 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4775 struct hci_chan *hchan = NULL;
4777 /* Placeholder - get hci_chan for logical link */
4780 if (hchan->state == BT_CONNECTED) {
4781 /* Logical link is ready to go */
4782 chan->hs_hcon = hchan->conn;
4783 chan->hs_hcon->l2cap_data = chan->conn;
4784 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4785 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4787 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4789 /* Wait for logical link to be ready */
4790 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4793 /* Logical link not available */
4794 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4798 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4800 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4802 if (result == -EINVAL)
4803 rsp_result = L2CAP_MR_BAD_ID;
4805 rsp_result = L2CAP_MR_NOT_ALLOWED;
4807 l2cap_send_move_chan_rsp(chan, rsp_result);
4810 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4811 chan->move_state = L2CAP_MOVE_STABLE;
4813 /* Restart data transmission */
4814 l2cap_ertm_send(chan);
4817 /* Invoke with locked chan */
4818 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4820 u8 local_amp_id = chan->local_amp_id;
4821 u8 remote_amp_id = chan->remote_amp_id;
4823 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4824 chan, result, local_amp_id, remote_amp_id);
4826 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4827 l2cap_chan_unlock(chan);
4831 if (chan->state != BT_CONNECTED) {
4832 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4833 } else if (result != L2CAP_MR_SUCCESS) {
4834 l2cap_do_move_cancel(chan, result);
4836 switch (chan->move_role) {
4837 case L2CAP_MOVE_ROLE_INITIATOR:
4838 l2cap_do_move_initiate(chan, local_amp_id,
4841 case L2CAP_MOVE_ROLE_RESPONDER:
4842 l2cap_do_move_respond(chan, result);
4845 l2cap_do_move_cancel(chan, result);
4851 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4852 struct l2cap_cmd_hdr *cmd,
4853 u16 cmd_len, void *data)
4855 struct l2cap_move_chan_req *req = data;
4856 struct l2cap_move_chan_rsp rsp;
4857 struct l2cap_chan *chan;
4859 u16 result = L2CAP_MR_NOT_ALLOWED;
4861 if (cmd_len != sizeof(*req))
4864 icid = le16_to_cpu(req->icid);
4866 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4868 if (!conn->hs_enabled)
4871 chan = l2cap_get_chan_by_dcid(conn, icid);
4873 rsp.icid = cpu_to_le16(icid);
4874 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4875 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4880 chan->ident = cmd->ident;
4882 if (chan->scid < L2CAP_CID_DYN_START ||
4883 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4884 (chan->mode != L2CAP_MODE_ERTM &&
4885 chan->mode != L2CAP_MODE_STREAMING)) {
4886 result = L2CAP_MR_NOT_ALLOWED;
4887 goto send_move_response;
4890 if (chan->local_amp_id == req->dest_amp_id) {
4891 result = L2CAP_MR_SAME_ID;
4892 goto send_move_response;
4895 if (req->dest_amp_id != AMP_ID_BREDR) {
4896 struct hci_dev *hdev;
4897 hdev = hci_dev_get(req->dest_amp_id);
4898 if (!hdev || hdev->dev_type != HCI_AMP ||
4899 !test_bit(HCI_UP, &hdev->flags)) {
4903 result = L2CAP_MR_BAD_ID;
4904 goto send_move_response;
4909 /* Detect a move collision. Only send a collision response
4910 * if this side has "lost", otherwise proceed with the move.
4911 * The winner has the larger bd_addr.
4913 if ((__chan_is_moving(chan) ||
4914 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4915 bacmp(conn->src, conn->dst) > 0) {
4916 result = L2CAP_MR_COLLISION;
4917 goto send_move_response;
4920 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4921 l2cap_move_setup(chan);
4922 chan->move_id = req->dest_amp_id;
4925 if (req->dest_amp_id == AMP_ID_BREDR) {
4926 /* Moving to BR/EDR */
4927 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4928 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4929 result = L2CAP_MR_PEND;
4931 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4932 result = L2CAP_MR_SUCCESS;
4935 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4936 /* Placeholder - uncomment when amp functions are available */
4937 /*amp_accept_physical(chan, req->dest_amp_id);*/
4938 result = L2CAP_MR_PEND;
4942 l2cap_send_move_chan_rsp(chan, result);
4944 l2cap_chan_unlock(chan);
4949 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4951 struct l2cap_chan *chan;
4952 struct hci_chan *hchan = NULL;
4954 chan = l2cap_get_chan_by_scid(conn, icid);
4956 l2cap_send_move_chan_cfm_icid(conn, icid);
4960 __clear_chan_timer(chan);
4961 if (result == L2CAP_MR_PEND)
4962 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4964 switch (chan->move_state) {
4965 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4966 /* Move confirm will be sent when logical link
4969 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4971 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4972 if (result == L2CAP_MR_PEND) {
4974 } else if (test_bit(CONN_LOCAL_BUSY,
4975 &chan->conn_state)) {
4976 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4978 /* Logical link is up or moving to BR/EDR,
4981 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4982 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4985 case L2CAP_MOVE_WAIT_RSP:
4987 if (result == L2CAP_MR_SUCCESS) {
4988 /* Remote is ready, send confirm immediately
4989 * after logical link is ready
4991 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4993 /* Both logical link and move success
4994 * are required to confirm
4996 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4999 /* Placeholder - get hci_chan for logical link */
5001 /* Logical link not available */
5002 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5006 /* If the logical link is not yet connected, do not
5007 * send confirmation.
5009 if (hchan->state != BT_CONNECTED)
5012 /* Logical link is already ready to go */
5014 chan->hs_hcon = hchan->conn;
5015 chan->hs_hcon->l2cap_data = chan->conn;
5017 if (result == L2CAP_MR_SUCCESS) {
5018 /* Can confirm now */
5019 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5021 /* Now only need move success
5024 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5027 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5030 /* Any other amp move state means the move failed. */
5031 chan->move_id = chan->local_amp_id;
5032 l2cap_move_done(chan);
5033 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5036 l2cap_chan_unlock(chan);
5039 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5042 struct l2cap_chan *chan;
5044 chan = l2cap_get_chan_by_ident(conn, ident);
5046 /* Could not locate channel, icid is best guess */
5047 l2cap_send_move_chan_cfm_icid(conn, icid);
5051 __clear_chan_timer(chan);
5053 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5054 if (result == L2CAP_MR_COLLISION) {
5055 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5057 /* Cleanup - cancel move */
5058 chan->move_id = chan->local_amp_id;
5059 l2cap_move_done(chan);
5063 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5065 l2cap_chan_unlock(chan);
5068 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5069 struct l2cap_cmd_hdr *cmd,
5070 u16 cmd_len, void *data)
5072 struct l2cap_move_chan_rsp *rsp = data;
5075 if (cmd_len != sizeof(*rsp))
5078 icid = le16_to_cpu(rsp->icid);
5079 result = le16_to_cpu(rsp->result);
5081 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5083 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5084 l2cap_move_continue(conn, icid, result);
5086 l2cap_move_fail(conn, cmd->ident, icid, result);
5091 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5092 struct l2cap_cmd_hdr *cmd,
5093 u16 cmd_len, void *data)
5095 struct l2cap_move_chan_cfm *cfm = data;
5096 struct l2cap_chan *chan;
5099 if (cmd_len != sizeof(*cfm))
5102 icid = le16_to_cpu(cfm->icid);
5103 result = le16_to_cpu(cfm->result);
5105 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5107 chan = l2cap_get_chan_by_dcid(conn, icid);
5109 /* Spec requires a response even if the icid was not found */
5110 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5114 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5115 if (result == L2CAP_MC_CONFIRMED) {
5116 chan->local_amp_id = chan->move_id;
5117 if (chan->local_amp_id == AMP_ID_BREDR)
5118 __release_logical_link(chan);
5120 chan->move_id = chan->local_amp_id;
5123 l2cap_move_done(chan);
5126 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5128 l2cap_chan_unlock(chan);
5133 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5134 struct l2cap_cmd_hdr *cmd,
5135 u16 cmd_len, void *data)
5137 struct l2cap_move_chan_cfm_rsp *rsp = data;
5138 struct l2cap_chan *chan;
5141 if (cmd_len != sizeof(*rsp))
5144 icid = le16_to_cpu(rsp->icid);
5146 BT_DBG("icid 0x%4.4x", icid);
5148 chan = l2cap_get_chan_by_scid(conn, icid);
5152 __clear_chan_timer(chan);
5154 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5155 chan->local_amp_id = chan->move_id;
5157 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5158 __release_logical_link(chan);
5160 l2cap_move_done(chan);
5163 l2cap_chan_unlock(chan);
5168 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5173 if (min > max || min < 6 || max > 3200)
5176 if (to_multiplier < 10 || to_multiplier > 3200)
5179 if (max >= to_multiplier * 8)
5182 max_latency = (to_multiplier * 8 / max) - 1;
5183 if (latency > 499 || latency > max_latency)
5189 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5190 struct l2cap_cmd_hdr *cmd,
5193 struct hci_conn *hcon = conn->hcon;
5194 struct l2cap_conn_param_update_req *req;
5195 struct l2cap_conn_param_update_rsp rsp;
5196 u16 min, max, latency, to_multiplier, cmd_len;
5199 if (!(hcon->link_mode & HCI_LM_MASTER))
5202 cmd_len = __le16_to_cpu(cmd->len);
5203 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5206 req = (struct l2cap_conn_param_update_req *) data;
5207 min = __le16_to_cpu(req->min);
5208 max = __le16_to_cpu(req->max);
5209 latency = __le16_to_cpu(req->latency);
5210 to_multiplier = __le16_to_cpu(req->to_multiplier);
5212 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5213 min, max, latency, to_multiplier);
5215 memset(&rsp, 0, sizeof(rsp));
5217 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5219 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5221 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5223 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5227 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5232 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5233 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5238 switch (cmd->code) {
5239 case L2CAP_COMMAND_REJ:
5240 l2cap_command_rej(conn, cmd, cmd_len, data);
5243 case L2CAP_CONN_REQ:
5244 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5247 case L2CAP_CONN_RSP:
5248 case L2CAP_CREATE_CHAN_RSP:
5249 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5252 case L2CAP_CONF_REQ:
5253 err = l2cap_config_req(conn, cmd, cmd_len, data);
5256 case L2CAP_CONF_RSP:
5257 l2cap_config_rsp(conn, cmd, cmd_len, data);
5260 case L2CAP_DISCONN_REQ:
5261 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5264 case L2CAP_DISCONN_RSP:
5265 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5268 case L2CAP_ECHO_REQ:
5269 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5272 case L2CAP_ECHO_RSP:
5275 case L2CAP_INFO_REQ:
5276 err = l2cap_information_req(conn, cmd, cmd_len, data);
5279 case L2CAP_INFO_RSP:
5280 l2cap_information_rsp(conn, cmd, cmd_len, data);
5283 case L2CAP_CREATE_CHAN_REQ:
5284 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5287 case L2CAP_MOVE_CHAN_REQ:
5288 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5291 case L2CAP_MOVE_CHAN_RSP:
5292 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5295 case L2CAP_MOVE_CHAN_CFM:
5296 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5299 case L2CAP_MOVE_CHAN_CFM_RSP:
5300 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5304 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5312 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5313 struct l2cap_cmd_hdr *cmd, u8 *data)
5315 switch (cmd->code) {
5316 case L2CAP_COMMAND_REJ:
5319 case L2CAP_CONN_PARAM_UPDATE_REQ:
5320 return l2cap_conn_param_update_req(conn, cmd, data);
5322 case L2CAP_CONN_PARAM_UPDATE_RSP:
5326 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5331 static __le16 l2cap_err_to_reason(int err)
5335 return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5337 return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5341 return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5345 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5346 struct sk_buff *skb)
5348 struct hci_conn *hcon = conn->hcon;
5349 struct l2cap_cmd_hdr *cmd;
5353 if (hcon->type != LE_LINK)
5356 if (skb->len < L2CAP_CMD_HDR_SIZE)
5359 cmd = (void *) skb->data;
5360 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5362 len = le16_to_cpu(cmd->len);
5364 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5366 if (len != skb->len || !cmd->ident) {
5367 BT_DBG("corrupted command");
5371 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5373 struct l2cap_cmd_rej_unk rej;
5375 BT_ERR("Wrong link type (%d)", err);
5377 rej.reason = l2cap_err_to_reason(err);
5378 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5386 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5387 struct sk_buff *skb)
5389 struct hci_conn *hcon = conn->hcon;
5390 u8 *data = skb->data;
5392 struct l2cap_cmd_hdr cmd;
5395 l2cap_raw_recv(conn, skb);
5397 if (hcon->type != ACL_LINK)
5400 while (len >= L2CAP_CMD_HDR_SIZE) {
5402 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5403 data += L2CAP_CMD_HDR_SIZE;
5404 len -= L2CAP_CMD_HDR_SIZE;
5406 cmd_len = le16_to_cpu(cmd.len);
5408 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5411 if (cmd_len > len || !cmd.ident) {
5412 BT_DBG("corrupted command");
5416 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5418 struct l2cap_cmd_rej_unk rej;
5420 BT_ERR("Wrong link type (%d)", err);
5422 rej.reason = l2cap_err_to_reason(err);
5423 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5435 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5437 u16 our_fcs, rcv_fcs;
5440 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5441 hdr_size = L2CAP_EXT_HDR_SIZE;
5443 hdr_size = L2CAP_ENH_HDR_SIZE;
5445 if (chan->fcs == L2CAP_FCS_CRC16) {
5446 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5447 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5448 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5450 if (our_fcs != rcv_fcs)
5456 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5458 struct l2cap_ctrl control;
5460 BT_DBG("chan %p", chan);
5462 memset(&control, 0, sizeof(control));
5465 control.reqseq = chan->buffer_seq;
5466 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5468 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5469 control.super = L2CAP_SUPER_RNR;
5470 l2cap_send_sframe(chan, &control);
5473 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5474 chan->unacked_frames > 0)
5475 __set_retrans_timer(chan);
5477 /* Send pending iframes */
5478 l2cap_ertm_send(chan);
5480 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5481 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5482 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5485 control.super = L2CAP_SUPER_RR;
5486 l2cap_send_sframe(chan, &control);
5490 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5491 struct sk_buff **last_frag)
5493 /* skb->len reflects data in skb as well as all fragments
5494 * skb->data_len reflects only data in fragments
5496 if (!skb_has_frag_list(skb))
5497 skb_shinfo(skb)->frag_list = new_frag;
5499 new_frag->next = NULL;
5501 (*last_frag)->next = new_frag;
5502 *last_frag = new_frag;
5504 skb->len += new_frag->len;
5505 skb->data_len += new_frag->len;
5506 skb->truesize += new_frag->truesize;
5509 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5510 struct l2cap_ctrl *control)
5514 switch (control->sar) {
5515 case L2CAP_SAR_UNSEGMENTED:
5519 err = chan->ops->recv(chan, skb);
5522 case L2CAP_SAR_START:
5526 chan->sdu_len = get_unaligned_le16(skb->data);
5527 skb_pull(skb, L2CAP_SDULEN_SIZE);
5529 if (chan->sdu_len > chan->imtu) {
5534 if (skb->len >= chan->sdu_len)
5538 chan->sdu_last_frag = skb;
5544 case L2CAP_SAR_CONTINUE:
5548 append_skb_frag(chan->sdu, skb,
5549 &chan->sdu_last_frag);
5552 if (chan->sdu->len >= chan->sdu_len)
5562 append_skb_frag(chan->sdu, skb,
5563 &chan->sdu_last_frag);
5566 if (chan->sdu->len != chan->sdu_len)
5569 err = chan->ops->recv(chan, chan->sdu);
5572 /* Reassembly complete */
5574 chan->sdu_last_frag = NULL;
5582 kfree_skb(chan->sdu);
5584 chan->sdu_last_frag = NULL;
5591 static int l2cap_resegment(struct l2cap_chan *chan)
5597 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5601 if (chan->mode != L2CAP_MODE_ERTM)
5604 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5605 l2cap_tx(chan, NULL, NULL, event);
5608 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5611 /* Pass sequential frames to l2cap_reassemble_sdu()
5612 * until a gap is encountered.
5615 BT_DBG("chan %p", chan);
5617 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5618 struct sk_buff *skb;
5619 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5620 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5622 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5627 skb_unlink(skb, &chan->srej_q);
5628 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5629 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5634 if (skb_queue_empty(&chan->srej_q)) {
5635 chan->rx_state = L2CAP_RX_STATE_RECV;
5636 l2cap_send_ack(chan);
5642 static void l2cap_handle_srej(struct l2cap_chan *chan,
5643 struct l2cap_ctrl *control)
5645 struct sk_buff *skb;
5647 BT_DBG("chan %p, control %p", chan, control);
5649 if (control->reqseq == chan->next_tx_seq) {
5650 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5651 l2cap_send_disconn_req(chan, ECONNRESET);
5655 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5658 BT_DBG("Seq %d not available for retransmission",
5663 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5664 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5665 l2cap_send_disconn_req(chan, ECONNRESET);
5669 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5671 if (control->poll) {
5672 l2cap_pass_to_tx(chan, control);
5674 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5675 l2cap_retransmit(chan, control);
5676 l2cap_ertm_send(chan);
5678 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5679 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5680 chan->srej_save_reqseq = control->reqseq;
5683 l2cap_pass_to_tx_fbit(chan, control);
5685 if (control->final) {
5686 if (chan->srej_save_reqseq != control->reqseq ||
5687 !test_and_clear_bit(CONN_SREJ_ACT,
5689 l2cap_retransmit(chan, control);
5691 l2cap_retransmit(chan, control);
5692 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5693 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5694 chan->srej_save_reqseq = control->reqseq;
5700 static void l2cap_handle_rej(struct l2cap_chan *chan,
5701 struct l2cap_ctrl *control)
5703 struct sk_buff *skb;
5705 BT_DBG("chan %p, control %p", chan, control);
5707 if (control->reqseq == chan->next_tx_seq) {
5708 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5709 l2cap_send_disconn_req(chan, ECONNRESET);
5713 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5715 if (chan->max_tx && skb &&
5716 bt_cb(skb)->control.retries >= chan->max_tx) {
5717 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5718 l2cap_send_disconn_req(chan, ECONNRESET);
5722 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5724 l2cap_pass_to_tx(chan, control);
5726 if (control->final) {
5727 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5728 l2cap_retransmit_all(chan, control);
5730 l2cap_retransmit_all(chan, control);
5731 l2cap_ertm_send(chan);
5732 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5733 set_bit(CONN_REJ_ACT, &chan->conn_state);
5737 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5739 BT_DBG("chan %p, txseq %d", chan, txseq);
5741 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5742 chan->expected_tx_seq);
5744 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5745 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5747 /* See notes below regarding "double poll" and
5750 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5751 BT_DBG("Invalid/Ignore - after SREJ");
5752 return L2CAP_TXSEQ_INVALID_IGNORE;
5754 BT_DBG("Invalid - in window after SREJ sent");
5755 return L2CAP_TXSEQ_INVALID;
5759 if (chan->srej_list.head == txseq) {
5760 BT_DBG("Expected SREJ");
5761 return L2CAP_TXSEQ_EXPECTED_SREJ;
5764 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5765 BT_DBG("Duplicate SREJ - txseq already stored");
5766 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5769 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5770 BT_DBG("Unexpected SREJ - not requested");
5771 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5775 if (chan->expected_tx_seq == txseq) {
5776 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5778 BT_DBG("Invalid - txseq outside tx window");
5779 return L2CAP_TXSEQ_INVALID;
5782 return L2CAP_TXSEQ_EXPECTED;
5786 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5787 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5788 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5789 return L2CAP_TXSEQ_DUPLICATE;
5792 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5793 /* A source of invalid packets is a "double poll" condition,
5794 * where delays cause us to send multiple poll packets. If
5795 * the remote stack receives and processes both polls,
5796 * sequence numbers can wrap around in such a way that a
5797 * resent frame has a sequence number that looks like new data
5798 * with a sequence gap. This would trigger an erroneous SREJ
5801 * Fortunately, this is impossible with a tx window that's
5802 * less than half of the maximum sequence number, which allows
5803 * invalid frames to be safely ignored.
5805 * With tx window sizes greater than half of the tx window
5806 * maximum, the frame is invalid and cannot be ignored. This
5807 * causes a disconnect.
5810 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5811 BT_DBG("Invalid/Ignore - txseq outside tx window");
5812 return L2CAP_TXSEQ_INVALID_IGNORE;
5814 BT_DBG("Invalid - txseq outside tx window");
5815 return L2CAP_TXSEQ_INVALID;
5818 BT_DBG("Unexpected - txseq indicates missing frames");
5819 return L2CAP_TXSEQ_UNEXPECTED;
5823 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5824 struct l2cap_ctrl *control,
5825 struct sk_buff *skb, u8 event)
5828 bool skb_in_use = false;
5830 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5834 case L2CAP_EV_RECV_IFRAME:
5835 switch (l2cap_classify_txseq(chan, control->txseq)) {
5836 case L2CAP_TXSEQ_EXPECTED:
5837 l2cap_pass_to_tx(chan, control);
5839 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5840 BT_DBG("Busy, discarding expected seq %d",
5845 chan->expected_tx_seq = __next_seq(chan,
5848 chan->buffer_seq = chan->expected_tx_seq;
5851 err = l2cap_reassemble_sdu(chan, skb, control);
5855 if (control->final) {
5856 if (!test_and_clear_bit(CONN_REJ_ACT,
5857 &chan->conn_state)) {
5859 l2cap_retransmit_all(chan, control);
5860 l2cap_ertm_send(chan);
5864 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5865 l2cap_send_ack(chan);
5867 case L2CAP_TXSEQ_UNEXPECTED:
5868 l2cap_pass_to_tx(chan, control);
5870 /* Can't issue SREJ frames in the local busy state.
5871 * Drop this frame, it will be seen as missing
5872 * when local busy is exited.
5874 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5875 BT_DBG("Busy, discarding unexpected seq %d",
5880 /* There was a gap in the sequence, so an SREJ
5881 * must be sent for each missing frame. The
5882 * current frame is stored for later use.
5884 skb_queue_tail(&chan->srej_q, skb);
5886 BT_DBG("Queued %p (queue len %d)", skb,
5887 skb_queue_len(&chan->srej_q));
5889 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5890 l2cap_seq_list_clear(&chan->srej_list);
5891 l2cap_send_srej(chan, control->txseq);
5893 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5895 case L2CAP_TXSEQ_DUPLICATE:
5896 l2cap_pass_to_tx(chan, control);
5898 case L2CAP_TXSEQ_INVALID_IGNORE:
5900 case L2CAP_TXSEQ_INVALID:
5902 l2cap_send_disconn_req(chan, ECONNRESET);
5906 case L2CAP_EV_RECV_RR:
5907 l2cap_pass_to_tx(chan, control);
5908 if (control->final) {
5909 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5911 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5912 !__chan_is_moving(chan)) {
5914 l2cap_retransmit_all(chan, control);
5917 l2cap_ertm_send(chan);
5918 } else if (control->poll) {
5919 l2cap_send_i_or_rr_or_rnr(chan);
5921 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5922 &chan->conn_state) &&
5923 chan->unacked_frames)
5924 __set_retrans_timer(chan);
5926 l2cap_ertm_send(chan);
5929 case L2CAP_EV_RECV_RNR:
5930 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5931 l2cap_pass_to_tx(chan, control);
5932 if (control && control->poll) {
5933 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5934 l2cap_send_rr_or_rnr(chan, 0);
5936 __clear_retrans_timer(chan);
5937 l2cap_seq_list_clear(&chan->retrans_list);
5939 case L2CAP_EV_RECV_REJ:
5940 l2cap_handle_rej(chan, control);
5942 case L2CAP_EV_RECV_SREJ:
5943 l2cap_handle_srej(chan, control);
5949 if (skb && !skb_in_use) {
5950 BT_DBG("Freeing %p", skb);
5957 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5958 struct l2cap_ctrl *control,
5959 struct sk_buff *skb, u8 event)
5962 u16 txseq = control->txseq;
5963 bool skb_in_use = false;
5965 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5969 case L2CAP_EV_RECV_IFRAME:
5970 switch (l2cap_classify_txseq(chan, txseq)) {
5971 case L2CAP_TXSEQ_EXPECTED:
5972 /* Keep frame for reassembly later */
5973 l2cap_pass_to_tx(chan, control);
5974 skb_queue_tail(&chan->srej_q, skb);
5976 BT_DBG("Queued %p (queue len %d)", skb,
5977 skb_queue_len(&chan->srej_q));
5979 chan->expected_tx_seq = __next_seq(chan, txseq);
5981 case L2CAP_TXSEQ_EXPECTED_SREJ:
5982 l2cap_seq_list_pop(&chan->srej_list);
5984 l2cap_pass_to_tx(chan, control);
5985 skb_queue_tail(&chan->srej_q, skb);
5987 BT_DBG("Queued %p (queue len %d)", skb,
5988 skb_queue_len(&chan->srej_q));
5990 err = l2cap_rx_queued_iframes(chan);
5995 case L2CAP_TXSEQ_UNEXPECTED:
5996 /* Got a frame that can't be reassembled yet.
5997 * Save it for later, and send SREJs to cover
5998 * the missing frames.
6000 skb_queue_tail(&chan->srej_q, skb);
6002 BT_DBG("Queued %p (queue len %d)", skb,
6003 skb_queue_len(&chan->srej_q));
6005 l2cap_pass_to_tx(chan, control);
6006 l2cap_send_srej(chan, control->txseq);
6008 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6009 /* This frame was requested with an SREJ, but
6010 * some expected retransmitted frames are
6011 * missing. Request retransmission of missing
6014 skb_queue_tail(&chan->srej_q, skb);
6016 BT_DBG("Queued %p (queue len %d)", skb,
6017 skb_queue_len(&chan->srej_q));
6019 l2cap_pass_to_tx(chan, control);
6020 l2cap_send_srej_list(chan, control->txseq);
6022 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6023 /* We've already queued this frame. Drop this copy. */
6024 l2cap_pass_to_tx(chan, control);
6026 case L2CAP_TXSEQ_DUPLICATE:
6027 /* Expecting a later sequence number, so this frame
6028 * was already received. Ignore it completely.
6031 case L2CAP_TXSEQ_INVALID_IGNORE:
6033 case L2CAP_TXSEQ_INVALID:
6035 l2cap_send_disconn_req(chan, ECONNRESET);
6039 case L2CAP_EV_RECV_RR:
6040 l2cap_pass_to_tx(chan, control);
6041 if (control->final) {
6042 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6044 if (!test_and_clear_bit(CONN_REJ_ACT,
6045 &chan->conn_state)) {
6047 l2cap_retransmit_all(chan, control);
6050 l2cap_ertm_send(chan);
6051 } else if (control->poll) {
6052 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6053 &chan->conn_state) &&
6054 chan->unacked_frames) {
6055 __set_retrans_timer(chan);
6058 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6059 l2cap_send_srej_tail(chan);
6061 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6062 &chan->conn_state) &&
6063 chan->unacked_frames)
6064 __set_retrans_timer(chan);
6066 l2cap_send_ack(chan);
6069 case L2CAP_EV_RECV_RNR:
6070 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6071 l2cap_pass_to_tx(chan, control);
6072 if (control->poll) {
6073 l2cap_send_srej_tail(chan);
6075 struct l2cap_ctrl rr_control;
6076 memset(&rr_control, 0, sizeof(rr_control));
6077 rr_control.sframe = 1;
6078 rr_control.super = L2CAP_SUPER_RR;
6079 rr_control.reqseq = chan->buffer_seq;
6080 l2cap_send_sframe(chan, &rr_control);
6084 case L2CAP_EV_RECV_REJ:
6085 l2cap_handle_rej(chan, control);
6087 case L2CAP_EV_RECV_SREJ:
6088 l2cap_handle_srej(chan, control);
6092 if (skb && !skb_in_use) {
6093 BT_DBG("Freeing %p", skb);
6100 static int l2cap_finish_move(struct l2cap_chan *chan)
6102 BT_DBG("chan %p", chan);
6104 chan->rx_state = L2CAP_RX_STATE_RECV;
6107 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6109 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6111 return l2cap_resegment(chan);
6114 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6115 struct l2cap_ctrl *control,
6116 struct sk_buff *skb, u8 event)
6120 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6126 l2cap_process_reqseq(chan, control->reqseq);
6128 if (!skb_queue_empty(&chan->tx_q))
6129 chan->tx_send_head = skb_peek(&chan->tx_q);
6131 chan->tx_send_head = NULL;
6133 /* Rewind next_tx_seq to the point expected
6136 chan->next_tx_seq = control->reqseq;
6137 chan->unacked_frames = 0;
6139 err = l2cap_finish_move(chan);
6143 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6144 l2cap_send_i_or_rr_or_rnr(chan);
6146 if (event == L2CAP_EV_RECV_IFRAME)
6149 return l2cap_rx_state_recv(chan, control, NULL, event);
6152 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6153 struct l2cap_ctrl *control,
6154 struct sk_buff *skb, u8 event)
6158 if (!control->final)
6161 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6163 chan->rx_state = L2CAP_RX_STATE_RECV;
6164 l2cap_process_reqseq(chan, control->reqseq);
6166 if (!skb_queue_empty(&chan->tx_q))
6167 chan->tx_send_head = skb_peek(&chan->tx_q);
6169 chan->tx_send_head = NULL;
6171 /* Rewind next_tx_seq to the point expected
6174 chan->next_tx_seq = control->reqseq;
6175 chan->unacked_frames = 0;
6178 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6180 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6182 err = l2cap_resegment(chan);
6185 err = l2cap_rx_state_recv(chan, control, skb, event);
6190 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6192 /* Make sure reqseq is for a packet that has been sent but not acked */
6195 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6196 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6199 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6200 struct sk_buff *skb, u8 event)
6204 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6205 control, skb, event, chan->rx_state);
6207 if (__valid_reqseq(chan, control->reqseq)) {
6208 switch (chan->rx_state) {
6209 case L2CAP_RX_STATE_RECV:
6210 err = l2cap_rx_state_recv(chan, control, skb, event);
6212 case L2CAP_RX_STATE_SREJ_SENT:
6213 err = l2cap_rx_state_srej_sent(chan, control, skb,
6216 case L2CAP_RX_STATE_WAIT_P:
6217 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6219 case L2CAP_RX_STATE_WAIT_F:
6220 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6227 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6228 control->reqseq, chan->next_tx_seq,
6229 chan->expected_ack_seq);
6230 l2cap_send_disconn_req(chan, ECONNRESET);
6236 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6237 struct sk_buff *skb)
6241 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6244 if (l2cap_classify_txseq(chan, control->txseq) ==
6245 L2CAP_TXSEQ_EXPECTED) {
6246 l2cap_pass_to_tx(chan, control);
6248 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6249 __next_seq(chan, chan->buffer_seq));
6251 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6253 l2cap_reassemble_sdu(chan, skb, control);
6256 kfree_skb(chan->sdu);
6259 chan->sdu_last_frag = NULL;
6263 BT_DBG("Freeing %p", skb);
6268 chan->last_acked_seq = control->txseq;
6269 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6274 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6276 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6280 __unpack_control(chan, skb);
6285 * We can just drop the corrupted I-frame here.
6286 * Receiver will miss it and start proper recovery
6287 * procedures and ask for retransmission.
6289 if (l2cap_check_fcs(chan, skb))
6292 if (!control->sframe && control->sar == L2CAP_SAR_START)
6293 len -= L2CAP_SDULEN_SIZE;
6295 if (chan->fcs == L2CAP_FCS_CRC16)
6296 len -= L2CAP_FCS_SIZE;
6298 if (len > chan->mps) {
6299 l2cap_send_disconn_req(chan, ECONNRESET);
6303 if (!control->sframe) {
6306 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6307 control->sar, control->reqseq, control->final,
6310 /* Validate F-bit - F=0 always valid, F=1 only
6311 * valid in TX WAIT_F
6313 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6316 if (chan->mode != L2CAP_MODE_STREAMING) {
6317 event = L2CAP_EV_RECV_IFRAME;
6318 err = l2cap_rx(chan, control, skb, event);
6320 err = l2cap_stream_rx(chan, control, skb);
6324 l2cap_send_disconn_req(chan, ECONNRESET);
6326 const u8 rx_func_to_event[4] = {
6327 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6328 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6331 /* Only I-frames are expected in streaming mode */
6332 if (chan->mode == L2CAP_MODE_STREAMING)
6335 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6336 control->reqseq, control->final, control->poll,
6340 BT_ERR("Trailing bytes: %d in sframe", len);
6341 l2cap_send_disconn_req(chan, ECONNRESET);
6345 /* Validate F and P bits */
6346 if (control->final && (control->poll ||
6347 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6350 event = rx_func_to_event[control->super];
6351 if (l2cap_rx(chan, control, skb, event))
6352 l2cap_send_disconn_req(chan, ECONNRESET);
6362 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6363 struct sk_buff *skb)
6365 struct l2cap_chan *chan;
6367 chan = l2cap_get_chan_by_scid(conn, cid);
6369 if (cid == L2CAP_CID_A2MP) {
6370 chan = a2mp_channel_create(conn, skb);
6376 l2cap_chan_lock(chan);
6378 BT_DBG("unknown cid 0x%4.4x", cid);
6379 /* Drop packet and return */
6385 BT_DBG("chan %p, len %d", chan, skb->len);
6387 if (chan->state != BT_CONNECTED)
6390 switch (chan->mode) {
6391 case L2CAP_MODE_BASIC:
6392 /* If socket recv buffers overflows we drop data here
6393 * which is *bad* because L2CAP has to be reliable.
6394 * But we don't have any other choice. L2CAP doesn't
6395 * provide flow control mechanism. */
6397 if (chan->imtu < skb->len)
6400 if (!chan->ops->recv(chan, skb))
6404 case L2CAP_MODE_ERTM:
6405 case L2CAP_MODE_STREAMING:
6406 l2cap_data_rcv(chan, skb);
6410 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6418 l2cap_chan_unlock(chan);
6421 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6422 struct sk_buff *skb)
6424 struct hci_conn *hcon = conn->hcon;
6425 struct l2cap_chan *chan;
6427 if (hcon->type != ACL_LINK)
6430 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6434 BT_DBG("chan %p, len %d", chan, skb->len);
6436 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6439 if (chan->imtu < skb->len)
6442 if (!chan->ops->recv(chan, skb))
6449 static void l2cap_att_channel(struct l2cap_conn *conn,
6450 struct sk_buff *skb)
6452 struct hci_conn *hcon = conn->hcon;
6453 struct l2cap_chan *chan;
6455 if (hcon->type != LE_LINK)
6458 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6459 conn->src, conn->dst);
6463 BT_DBG("chan %p, len %d", chan, skb->len);
6465 if (chan->imtu < skb->len)
6468 if (!chan->ops->recv(chan, skb))
6475 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6477 struct l2cap_hdr *lh = (void *) skb->data;
6481 skb_pull(skb, L2CAP_HDR_SIZE);
6482 cid = __le16_to_cpu(lh->cid);
6483 len = __le16_to_cpu(lh->len);
6485 if (len != skb->len) {
6490 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6493 case L2CAP_CID_SIGNALING:
6494 l2cap_sig_channel(conn, skb);
6497 case L2CAP_CID_CONN_LESS:
6498 psm = get_unaligned((__le16 *) skb->data);
6499 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6500 l2cap_conless_channel(conn, psm, skb);
6504 l2cap_att_channel(conn, skb);
6507 case L2CAP_CID_LE_SIGNALING:
6508 l2cap_le_sig_channel(conn, skb);
6512 if (smp_sig_channel(conn, skb))
6513 l2cap_conn_del(conn->hcon, EACCES);
6517 l2cap_data_channel(conn, cid, skb);
6522 /* ---- L2CAP interface with lower layer (HCI) ---- */
6524 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6526 int exact = 0, lm1 = 0, lm2 = 0;
6527 struct l2cap_chan *c;
6529 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6531 /* Find listening sockets and check their link_mode */
6532 read_lock(&chan_list_lock);
6533 list_for_each_entry(c, &chan_list, global_l) {
6534 struct sock *sk = c->sk;
6536 if (c->state != BT_LISTEN)
6539 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6540 lm1 |= HCI_LM_ACCEPT;
6541 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6542 lm1 |= HCI_LM_MASTER;
6544 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6545 lm2 |= HCI_LM_ACCEPT;
6546 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6547 lm2 |= HCI_LM_MASTER;
6550 read_unlock(&chan_list_lock);
6552 return exact ? lm1 : lm2;
6555 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6557 struct l2cap_conn *conn;
6559 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6562 conn = l2cap_conn_add(hcon);
6564 l2cap_conn_ready(conn);
6566 l2cap_conn_del(hcon, bt_to_errno(status));
6570 int l2cap_disconn_ind(struct hci_conn *hcon)
6572 struct l2cap_conn *conn = hcon->l2cap_data;
6574 BT_DBG("hcon %p", hcon);
6577 return HCI_ERROR_REMOTE_USER_TERM;
6578 return conn->disc_reason;
6581 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6583 BT_DBG("hcon %p reason %d", hcon, reason);
6585 l2cap_conn_del(hcon, bt_to_errno(reason));
6588 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6590 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6593 if (encrypt == 0x00) {
6594 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6595 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6596 } else if (chan->sec_level == BT_SECURITY_HIGH)
6597 l2cap_chan_close(chan, ECONNREFUSED);
6599 if (chan->sec_level == BT_SECURITY_MEDIUM)
6600 __clear_chan_timer(chan);
6604 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6606 struct l2cap_conn *conn = hcon->l2cap_data;
6607 struct l2cap_chan *chan;
6612 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6614 if (hcon->type == LE_LINK) {
6615 if (!status && encrypt)
6616 smp_distribute_keys(conn, 0);
6617 cancel_delayed_work(&conn->security_timer);
6620 mutex_lock(&conn->chan_lock);
6622 list_for_each_entry(chan, &conn->chan_l, list) {
6623 l2cap_chan_lock(chan);
6625 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6626 state_to_string(chan->state));
6628 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6629 l2cap_chan_unlock(chan);
6633 if (chan->scid == L2CAP_CID_ATT) {
6634 if (!status && encrypt) {
6635 chan->sec_level = hcon->sec_level;
6636 l2cap_chan_ready(chan);
6639 l2cap_chan_unlock(chan);
6643 if (!__l2cap_no_conn_pending(chan)) {
6644 l2cap_chan_unlock(chan);
6648 if (!status && (chan->state == BT_CONNECTED ||
6649 chan->state == BT_CONFIG)) {
6650 struct sock *sk = chan->sk;
6652 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6653 sk->sk_state_change(sk);
6655 l2cap_check_encryption(chan, encrypt);
6656 l2cap_chan_unlock(chan);
6660 if (chan->state == BT_CONNECT) {
6662 l2cap_start_connection(chan);
6664 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6666 } else if (chan->state == BT_CONNECT2) {
6667 struct sock *sk = chan->sk;
6668 struct l2cap_conn_rsp rsp;
6674 if (test_bit(BT_SK_DEFER_SETUP,
6675 &bt_sk(sk)->flags)) {
6676 res = L2CAP_CR_PEND;
6677 stat = L2CAP_CS_AUTHOR_PEND;
6678 chan->ops->defer(chan);
6680 __l2cap_state_change(chan, BT_CONFIG);
6681 res = L2CAP_CR_SUCCESS;
6682 stat = L2CAP_CS_NO_INFO;
6685 __l2cap_state_change(chan, BT_DISCONN);
6686 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6687 res = L2CAP_CR_SEC_BLOCK;
6688 stat = L2CAP_CS_NO_INFO;
6693 rsp.scid = cpu_to_le16(chan->dcid);
6694 rsp.dcid = cpu_to_le16(chan->scid);
6695 rsp.result = cpu_to_le16(res);
6696 rsp.status = cpu_to_le16(stat);
6697 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6700 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6701 res == L2CAP_CR_SUCCESS) {
6703 set_bit(CONF_REQ_SENT, &chan->conf_state);
6704 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6706 l2cap_build_conf_req(chan, buf),
6708 chan->num_conf_req++;
6712 l2cap_chan_unlock(chan);
6715 mutex_unlock(&conn->chan_lock);
6720 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6722 struct l2cap_conn *conn = hcon->l2cap_data;
6723 struct l2cap_hdr *hdr;
6726 /* For AMP controller do not create l2cap conn */
6727 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6731 conn = l2cap_conn_add(hcon);
6736 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6740 case ACL_START_NO_FLUSH:
6743 BT_ERR("Unexpected start frame (len %d)", skb->len);
6744 kfree_skb(conn->rx_skb);
6745 conn->rx_skb = NULL;
6747 l2cap_conn_unreliable(conn, ECOMM);
6750 /* Start fragment always begin with Basic L2CAP header */
6751 if (skb->len < L2CAP_HDR_SIZE) {
6752 BT_ERR("Frame is too short (len %d)", skb->len);
6753 l2cap_conn_unreliable(conn, ECOMM);
6757 hdr = (struct l2cap_hdr *) skb->data;
6758 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6760 if (len == skb->len) {
6761 /* Complete frame received */
6762 l2cap_recv_frame(conn, skb);
6766 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6768 if (skb->len > len) {
6769 BT_ERR("Frame is too long (len %d, expected len %d)",
6771 l2cap_conn_unreliable(conn, ECOMM);
6775 /* Allocate skb for the complete frame (with header) */
6776 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6780 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6782 conn->rx_len = len - skb->len;
6786 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6788 if (!conn->rx_len) {
6789 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6790 l2cap_conn_unreliable(conn, ECOMM);
6794 if (skb->len > conn->rx_len) {
6795 BT_ERR("Fragment is too long (len %d, expected %d)",
6796 skb->len, conn->rx_len);
6797 kfree_skb(conn->rx_skb);
6798 conn->rx_skb = NULL;
6800 l2cap_conn_unreliable(conn, ECOMM);
6804 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6806 conn->rx_len -= skb->len;
6808 if (!conn->rx_len) {
6809 /* Complete frame received. l2cap_recv_frame
6810 * takes ownership of the skb so set the global
6811 * rx_skb pointer to NULL first.
6813 struct sk_buff *rx_skb = conn->rx_skb;
6814 conn->rx_skb = NULL;
6815 l2cap_recv_frame(conn, rx_skb);
6825 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6827 struct l2cap_chan *c;
6829 read_lock(&chan_list_lock);
6831 list_for_each_entry(c, &chan_list, global_l) {
6832 struct sock *sk = c->sk;
6834 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6835 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6836 c->state, __le16_to_cpu(c->psm),
6837 c->scid, c->dcid, c->imtu, c->omtu,
6838 c->sec_level, c->mode);
6841 read_unlock(&chan_list_lock);
6846 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6848 return single_open(file, l2cap_debugfs_show, inode->i_private);
6851 static const struct file_operations l2cap_debugfs_fops = {
6852 .open = l2cap_debugfs_open,
6854 .llseek = seq_lseek,
6855 .release = single_release,
6858 static struct dentry *l2cap_debugfs;
6860 int __init l2cap_init(void)
6864 err = l2cap_init_sockets();
6869 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6870 NULL, &l2cap_debugfs_fops);
6872 BT_ERR("Failed to create L2CAP debug file");
6878 void l2cap_exit(void)
6880 debugfs_remove(l2cap_debugfs);
6881 l2cap_cleanup_sockets();
6884 module_param(disable_ertm, bool, 0644);
6885 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");