2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
495 chan->imtu = L2CAP_DEFAULT_MTU;
496 chan->omtu = L2CAP_LE_MIN_MTU;
497 chan->mode = L2CAP_MODE_LE_FLOWCTL;
498 chan->tx_credits = 0;
499 chan->rx_credits = L2CAP_LE_MAX_CREDITS;
502 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
504 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
505 __le16_to_cpu(chan->psm), chan->dcid);
507 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
511 switch (chan->chan_type) {
512 case L2CAP_CHAN_CONN_ORIENTED:
513 if (conn->hcon->type == LE_LINK) {
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 if (chan->dcid == L2CAP_CID_ATT)
517 chan->scid = L2CAP_CID_ATT;
519 chan->scid = l2cap_alloc_cid(conn);
521 /* Alloc CID for connection-oriented socket */
522 chan->scid = l2cap_alloc_cid(conn);
523 chan->omtu = L2CAP_DEFAULT_MTU;
527 case L2CAP_CHAN_CONN_LESS:
528 /* Connectionless socket */
529 chan->scid = L2CAP_CID_CONN_LESS;
530 chan->dcid = L2CAP_CID_CONN_LESS;
531 chan->omtu = L2CAP_DEFAULT_MTU;
534 case L2CAP_CHAN_CONN_FIX_A2MP:
535 chan->scid = L2CAP_CID_A2MP;
536 chan->dcid = L2CAP_CID_A2MP;
537 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
538 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
542 /* Raw socket can send/recv signalling messages only */
543 chan->scid = L2CAP_CID_SIGNALING;
544 chan->dcid = L2CAP_CID_SIGNALING;
545 chan->omtu = L2CAP_DEFAULT_MTU;
548 chan->local_id = L2CAP_BESTEFFORT_ID;
549 chan->local_stype = L2CAP_SERV_BESTEFFORT;
550 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
551 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
552 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
553 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
555 l2cap_chan_hold(chan);
557 hci_conn_hold(conn->hcon);
559 list_add(&chan->list, &conn->chan_l);
562 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
564 mutex_lock(&conn->chan_lock);
565 __l2cap_chan_add(conn, chan);
566 mutex_unlock(&conn->chan_lock);
569 void l2cap_chan_del(struct l2cap_chan *chan, int err)
571 struct l2cap_conn *conn = chan->conn;
573 __clear_chan_timer(chan);
575 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 /* Delete from channel list */
580 list_del(&chan->list);
582 l2cap_chan_put(chan);
586 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
587 hci_conn_drop(conn->hcon);
589 if (mgr && mgr->bredr_chan == chan)
590 mgr->bredr_chan = NULL;
593 if (chan->hs_hchan) {
594 struct hci_chan *hs_hchan = chan->hs_hchan;
596 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
597 amp_disconnect_logical_link(hs_hchan);
600 chan->ops->teardown(chan, err);
602 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
606 case L2CAP_MODE_BASIC:
609 case L2CAP_MODE_LE_FLOWCTL:
610 skb_queue_purge(&chan->tx_q);
613 case L2CAP_MODE_ERTM:
614 __clear_retrans_timer(chan);
615 __clear_monitor_timer(chan);
616 __clear_ack_timer(chan);
618 skb_queue_purge(&chan->srej_q);
620 l2cap_seq_list_free(&chan->srej_list);
621 l2cap_seq_list_free(&chan->retrans_list);
625 case L2CAP_MODE_STREAMING:
626 skb_queue_purge(&chan->tx_q);
633 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
635 struct l2cap_conn *conn = chan->conn;
636 struct l2cap_le_conn_rsp rsp;
639 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
640 result = L2CAP_CR_AUTHORIZATION;
642 result = L2CAP_CR_BAD_PSM;
644 l2cap_state_change(chan, BT_DISCONN);
646 rsp.dcid = cpu_to_le16(chan->scid);
647 rsp.mtu = cpu_to_le16(chan->imtu);
648 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
649 rsp.credits = cpu_to_le16(chan->rx_credits);
650 rsp.result = cpu_to_le16(result);
652 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
656 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
658 struct l2cap_conn *conn = chan->conn;
659 struct l2cap_conn_rsp rsp;
662 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 result = L2CAP_CR_SEC_BLOCK;
665 result = L2CAP_CR_BAD_PSM;
667 l2cap_state_change(chan, BT_DISCONN);
669 rsp.scid = cpu_to_le16(chan->dcid);
670 rsp.dcid = cpu_to_le16(chan->scid);
671 rsp.result = cpu_to_le16(result);
672 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
674 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
677 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
679 struct l2cap_conn *conn = chan->conn;
681 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
683 switch (chan->state) {
685 chan->ops->teardown(chan, 0);
690 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
691 * check for chan->psm.
693 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
694 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
695 l2cap_send_disconn_req(chan, reason);
697 l2cap_chan_del(chan, reason);
701 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
702 if (conn->hcon->type == ACL_LINK)
703 l2cap_chan_connect_reject(chan);
704 else if (conn->hcon->type == LE_LINK)
705 l2cap_chan_le_connect_reject(chan);
708 l2cap_chan_del(chan, reason);
713 l2cap_chan_del(chan, reason);
717 chan->ops->teardown(chan, 0);
722 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
724 switch (chan->chan_type) {
726 switch (chan->sec_level) {
727 case BT_SECURITY_HIGH:
728 return HCI_AT_DEDICATED_BONDING_MITM;
729 case BT_SECURITY_MEDIUM:
730 return HCI_AT_DEDICATED_BONDING;
732 return HCI_AT_NO_BONDING;
735 case L2CAP_CHAN_CONN_LESS:
736 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
737 if (chan->sec_level == BT_SECURITY_LOW)
738 chan->sec_level = BT_SECURITY_SDP;
740 if (chan->sec_level == BT_SECURITY_HIGH)
741 return HCI_AT_NO_BONDING_MITM;
743 return HCI_AT_NO_BONDING;
745 case L2CAP_CHAN_CONN_ORIENTED:
746 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
747 if (chan->sec_level == BT_SECURITY_LOW)
748 chan->sec_level = BT_SECURITY_SDP;
750 if (chan->sec_level == BT_SECURITY_HIGH)
751 return HCI_AT_NO_BONDING_MITM;
753 return HCI_AT_NO_BONDING;
757 switch (chan->sec_level) {
758 case BT_SECURITY_HIGH:
759 return HCI_AT_GENERAL_BONDING_MITM;
760 case BT_SECURITY_MEDIUM:
761 return HCI_AT_GENERAL_BONDING;
763 return HCI_AT_NO_BONDING;
769 /* Service level security */
770 int l2cap_chan_check_security(struct l2cap_chan *chan)
772 struct l2cap_conn *conn = chan->conn;
775 if (conn->hcon->type == LE_LINK)
776 return smp_conn_security(conn->hcon, chan->sec_level);
778 auth_type = l2cap_get_auth_type(chan);
780 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
783 static u8 l2cap_get_ident(struct l2cap_conn *conn)
787 /* Get next available identificator.
788 * 1 - 128 are used by kernel.
789 * 129 - 199 are reserved.
790 * 200 - 254 are used by utilities like l2ping, etc.
793 spin_lock(&conn->lock);
795 if (++conn->tx_ident > 128)
800 spin_unlock(&conn->lock);
805 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
808 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
811 BT_DBG("code 0x%2.2x", code);
816 if (lmp_no_flush_capable(conn->hcon->hdev))
817 flags = ACL_START_NO_FLUSH;
821 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
822 skb->priority = HCI_PRIO_MAX;
824 hci_send_acl(conn->hchan, skb, flags);
827 static bool __chan_is_moving(struct l2cap_chan *chan)
829 return chan->move_state != L2CAP_MOVE_STABLE &&
830 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
833 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
835 struct hci_conn *hcon = chan->conn->hcon;
838 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
841 if (chan->hs_hcon && !__chan_is_moving(chan)) {
843 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
850 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
851 lmp_no_flush_capable(hcon->hdev))
852 flags = ACL_START_NO_FLUSH;
856 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
857 hci_send_acl(chan->conn->hchan, skb, flags);
860 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
862 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
863 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
865 if (enh & L2CAP_CTRL_FRAME_TYPE) {
868 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
869 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
876 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
877 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
884 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
886 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
887 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
889 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
892 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
893 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
900 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
901 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
908 static inline void __unpack_control(struct l2cap_chan *chan,
911 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
912 __unpack_extended_control(get_unaligned_le32(skb->data),
913 &bt_cb(skb)->control);
914 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
916 __unpack_enhanced_control(get_unaligned_le16(skb->data),
917 &bt_cb(skb)->control);
918 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
922 static u32 __pack_extended_control(struct l2cap_ctrl *control)
926 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
927 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
929 if (control->sframe) {
930 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
931 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
932 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
934 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
935 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
941 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
945 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
946 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
948 if (control->sframe) {
949 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
950 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
951 packed |= L2CAP_CTRL_FRAME_TYPE;
953 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
954 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
960 static inline void __pack_control(struct l2cap_chan *chan,
961 struct l2cap_ctrl *control,
964 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
965 put_unaligned_le32(__pack_extended_control(control),
966 skb->data + L2CAP_HDR_SIZE);
968 put_unaligned_le16(__pack_enhanced_control(control),
969 skb->data + L2CAP_HDR_SIZE);
973 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
975 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
976 return L2CAP_EXT_HDR_SIZE;
978 return L2CAP_ENH_HDR_SIZE;
981 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
985 struct l2cap_hdr *lh;
986 int hlen = __ertm_hdr_size(chan);
988 if (chan->fcs == L2CAP_FCS_CRC16)
989 hlen += L2CAP_FCS_SIZE;
991 skb = bt_skb_alloc(hlen, GFP_KERNEL);
994 return ERR_PTR(-ENOMEM);
996 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
997 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
998 lh->cid = cpu_to_le16(chan->dcid);
1000 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1001 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1003 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1005 if (chan->fcs == L2CAP_FCS_CRC16) {
1006 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1007 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1010 skb->priority = HCI_PRIO_MAX;
1014 static void l2cap_send_sframe(struct l2cap_chan *chan,
1015 struct l2cap_ctrl *control)
1017 struct sk_buff *skb;
1020 BT_DBG("chan %p, control %p", chan, control);
1022 if (!control->sframe)
1025 if (__chan_is_moving(chan))
1028 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1032 if (control->super == L2CAP_SUPER_RR)
1033 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1034 else if (control->super == L2CAP_SUPER_RNR)
1035 set_bit(CONN_RNR_SENT, &chan->conn_state);
1037 if (control->super != L2CAP_SUPER_SREJ) {
1038 chan->last_acked_seq = control->reqseq;
1039 __clear_ack_timer(chan);
1042 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1043 control->final, control->poll, control->super);
1045 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1046 control_field = __pack_extended_control(control);
1048 control_field = __pack_enhanced_control(control);
1050 skb = l2cap_create_sframe_pdu(chan, control_field);
1052 l2cap_do_send(chan, skb);
1055 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1057 struct l2cap_ctrl control;
1059 BT_DBG("chan %p, poll %d", chan, poll);
1061 memset(&control, 0, sizeof(control));
1063 control.poll = poll;
1065 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1066 control.super = L2CAP_SUPER_RNR;
1068 control.super = L2CAP_SUPER_RR;
1070 control.reqseq = chan->buffer_seq;
1071 l2cap_send_sframe(chan, &control);
1074 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1076 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1079 static bool __amp_capable(struct l2cap_chan *chan)
1081 struct l2cap_conn *conn = chan->conn;
1082 struct hci_dev *hdev;
1083 bool amp_available = false;
1085 if (!conn->hs_enabled)
1088 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1091 read_lock(&hci_dev_list_lock);
1092 list_for_each_entry(hdev, &hci_dev_list, list) {
1093 if (hdev->amp_type != AMP_TYPE_BREDR &&
1094 test_bit(HCI_UP, &hdev->flags)) {
1095 amp_available = true;
1099 read_unlock(&hci_dev_list_lock);
1101 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1102 return amp_available;
1107 static bool l2cap_check_efs(struct l2cap_chan *chan)
1109 /* Check EFS parameters */
1113 void l2cap_send_conn_req(struct l2cap_chan *chan)
1115 struct l2cap_conn *conn = chan->conn;
1116 struct l2cap_conn_req req;
1118 req.scid = cpu_to_le16(chan->scid);
1119 req.psm = chan->psm;
1121 chan->ident = l2cap_get_ident(conn);
1123 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1125 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1128 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1130 struct l2cap_create_chan_req req;
1131 req.scid = cpu_to_le16(chan->scid);
1132 req.psm = chan->psm;
1133 req.amp_id = amp_id;
1135 chan->ident = l2cap_get_ident(chan->conn);
1137 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1141 static void l2cap_move_setup(struct l2cap_chan *chan)
1143 struct sk_buff *skb;
1145 BT_DBG("chan %p", chan);
1147 if (chan->mode != L2CAP_MODE_ERTM)
1150 __clear_retrans_timer(chan);
1151 __clear_monitor_timer(chan);
1152 __clear_ack_timer(chan);
1154 chan->retry_count = 0;
1155 skb_queue_walk(&chan->tx_q, skb) {
1156 if (bt_cb(skb)->control.retries)
1157 bt_cb(skb)->control.retries = 1;
1162 chan->expected_tx_seq = chan->buffer_seq;
1164 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1165 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1166 l2cap_seq_list_clear(&chan->retrans_list);
1167 l2cap_seq_list_clear(&chan->srej_list);
1168 skb_queue_purge(&chan->srej_q);
1170 chan->tx_state = L2CAP_TX_STATE_XMIT;
1171 chan->rx_state = L2CAP_RX_STATE_MOVE;
1173 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1176 static void l2cap_move_done(struct l2cap_chan *chan)
1178 u8 move_role = chan->move_role;
1179 BT_DBG("chan %p", chan);
1181 chan->move_state = L2CAP_MOVE_STABLE;
1182 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1184 if (chan->mode != L2CAP_MODE_ERTM)
1187 switch (move_role) {
1188 case L2CAP_MOVE_ROLE_INITIATOR:
1189 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1190 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1192 case L2CAP_MOVE_ROLE_RESPONDER:
1193 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1198 static void l2cap_le_flowctl_start(struct l2cap_chan *chan)
1201 chan->sdu_last_frag = NULL;
1204 skb_queue_head_init(&chan->tx_q);
1206 if (!chan->tx_credits)
1207 chan->ops->suspend(chan);
1210 static void l2cap_chan_ready(struct l2cap_chan *chan)
1212 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1213 chan->conf_state = 0;
1214 __clear_chan_timer(chan);
1216 if (chan->mode == L2CAP_MODE_LE_FLOWCTL)
1217 l2cap_le_flowctl_start(chan);
1219 chan->state = BT_CONNECTED;
1221 chan->ops->ready(chan);
1224 static void l2cap_le_connect(struct l2cap_chan *chan)
1226 struct l2cap_conn *conn = chan->conn;
1227 struct l2cap_le_conn_req req;
1229 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1232 req.psm = chan->psm;
1233 req.scid = cpu_to_le16(chan->scid);
1234 req.mtu = cpu_to_le16(chan->imtu);
1235 req.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
1236 req.credits = cpu_to_le16(chan->rx_credits);
1238 chan->ident = l2cap_get_ident(conn);
1240 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1244 static void l2cap_le_start(struct l2cap_chan *chan)
1246 struct l2cap_conn *conn = chan->conn;
1248 if (!smp_conn_security(conn->hcon, chan->sec_level))
1252 l2cap_chan_ready(chan);
1256 if (chan->state == BT_CONNECT)
1257 l2cap_le_connect(chan);
1260 static void l2cap_start_connection(struct l2cap_chan *chan)
1262 if (__amp_capable(chan)) {
1263 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1264 a2mp_discover_amp(chan);
1265 } else if (chan->conn->hcon->type == LE_LINK) {
1266 l2cap_le_start(chan);
1268 l2cap_send_conn_req(chan);
1272 static void l2cap_do_start(struct l2cap_chan *chan)
1274 struct l2cap_conn *conn = chan->conn;
1276 if (conn->hcon->type == LE_LINK) {
1277 l2cap_le_start(chan);
1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1282 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1285 if (l2cap_chan_check_security(chan) &&
1286 __l2cap_no_conn_pending(chan)) {
1287 l2cap_start_connection(chan);
1290 struct l2cap_info_req req;
1291 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1293 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1294 conn->info_ident = l2cap_get_ident(conn);
1296 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1298 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1303 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1305 u32 local_feat_mask = l2cap_feat_mask;
1307 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1310 case L2CAP_MODE_ERTM:
1311 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1312 case L2CAP_MODE_STREAMING:
1313 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1319 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1321 struct l2cap_conn *conn = chan->conn;
1322 struct l2cap_disconn_req req;
1327 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1328 __clear_retrans_timer(chan);
1329 __clear_monitor_timer(chan);
1330 __clear_ack_timer(chan);
1333 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1334 l2cap_state_change(chan, BT_DISCONN);
1338 req.dcid = cpu_to_le16(chan->dcid);
1339 req.scid = cpu_to_le16(chan->scid);
1340 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1343 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1346 /* ---- L2CAP connections ---- */
1347 static void l2cap_conn_start(struct l2cap_conn *conn)
1349 struct l2cap_chan *chan, *tmp;
1351 BT_DBG("conn %p", conn);
1353 mutex_lock(&conn->chan_lock);
1355 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1356 l2cap_chan_lock(chan);
1358 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1359 l2cap_chan_unlock(chan);
1363 if (chan->state == BT_CONNECT) {
1364 if (!l2cap_chan_check_security(chan) ||
1365 !__l2cap_no_conn_pending(chan)) {
1366 l2cap_chan_unlock(chan);
1370 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1371 && test_bit(CONF_STATE2_DEVICE,
1372 &chan->conf_state)) {
1373 l2cap_chan_close(chan, ECONNRESET);
1374 l2cap_chan_unlock(chan);
1378 l2cap_start_connection(chan);
1380 } else if (chan->state == BT_CONNECT2) {
1381 struct l2cap_conn_rsp rsp;
1383 rsp.scid = cpu_to_le16(chan->dcid);
1384 rsp.dcid = cpu_to_le16(chan->scid);
1386 if (l2cap_chan_check_security(chan)) {
1387 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1388 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1389 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1390 chan->ops->defer(chan);
1393 l2cap_state_change(chan, BT_CONFIG);
1394 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1395 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1398 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1399 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1402 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1405 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1406 rsp.result != L2CAP_CR_SUCCESS) {
1407 l2cap_chan_unlock(chan);
1411 set_bit(CONF_REQ_SENT, &chan->conf_state);
1412 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1413 l2cap_build_conf_req(chan, buf), buf);
1414 chan->num_conf_req++;
1417 l2cap_chan_unlock(chan);
1420 mutex_unlock(&conn->chan_lock);
1423 /* Find socket with cid and source/destination bdaddr.
1424 * Returns closest match, locked.
1426 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1430 struct l2cap_chan *c, *c1 = NULL;
1432 read_lock(&chan_list_lock);
1434 list_for_each_entry(c, &chan_list, global_l) {
1435 if (state && c->state != state)
1438 if (c->scid == cid) {
1439 int src_match, dst_match;
1440 int src_any, dst_any;
1443 src_match = !bacmp(&c->src, src);
1444 dst_match = !bacmp(&c->dst, dst);
1445 if (src_match && dst_match) {
1446 read_unlock(&chan_list_lock);
1451 src_any = !bacmp(&c->src, BDADDR_ANY);
1452 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1453 if ((src_match && dst_any) || (src_any && dst_match) ||
1454 (src_any && dst_any))
1459 read_unlock(&chan_list_lock);
1464 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1466 struct hci_conn *hcon = conn->hcon;
1467 struct l2cap_chan *chan, *pchan;
1472 /* Check if we have socket listening on cid */
1473 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1474 &hcon->src, &hcon->dst);
1478 /* Client ATT sockets should override the server one */
1479 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1482 dst_type = bdaddr_type(hcon, hcon->dst_type);
1484 /* If device is blocked, do not create a channel for it */
1485 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1488 l2cap_chan_lock(pchan);
1490 chan = pchan->ops->new_connection(pchan);
1494 chan->dcid = L2CAP_CID_ATT;
1496 bacpy(&chan->src, &hcon->src);
1497 bacpy(&chan->dst, &hcon->dst);
1498 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1499 chan->dst_type = dst_type;
1501 __l2cap_chan_add(conn, chan);
1504 l2cap_chan_unlock(pchan);
1507 static void l2cap_conn_ready(struct l2cap_conn *conn)
1509 struct l2cap_chan *chan;
1510 struct hci_conn *hcon = conn->hcon;
1512 BT_DBG("conn %p", conn);
1514 /* For outgoing pairing which doesn't necessarily have an
1515 * associated socket (e.g. mgmt_pair_device).
1517 if (hcon->out && hcon->type == LE_LINK)
1518 smp_conn_security(hcon, hcon->pending_sec_level);
1520 mutex_lock(&conn->chan_lock);
1522 if (hcon->type == LE_LINK)
1523 l2cap_le_conn_ready(conn);
1525 list_for_each_entry(chan, &conn->chan_l, list) {
1527 l2cap_chan_lock(chan);
1529 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1530 l2cap_chan_unlock(chan);
1534 if (hcon->type == LE_LINK) {
1535 l2cap_le_start(chan);
1536 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1537 l2cap_chan_ready(chan);
1539 } else if (chan->state == BT_CONNECT) {
1540 l2cap_do_start(chan);
1543 l2cap_chan_unlock(chan);
1546 mutex_unlock(&conn->chan_lock);
1549 /* Notify sockets that we cannot guaranty reliability anymore */
1550 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1552 struct l2cap_chan *chan;
1554 BT_DBG("conn %p", conn);
1556 mutex_lock(&conn->chan_lock);
1558 list_for_each_entry(chan, &conn->chan_l, list) {
1559 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1560 l2cap_chan_set_err(chan, err);
1563 mutex_unlock(&conn->chan_lock);
1566 static void l2cap_info_timeout(struct work_struct *work)
1568 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1572 conn->info_ident = 0;
1574 l2cap_conn_start(conn);
1579 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1580 * callback is called during registration. The ->remove callback is called
1581 * during unregistration.
1582 * An l2cap_user object can either be explicitly unregistered or when the
1583 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1584 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1585 * External modules must own a reference to the l2cap_conn object if they intend
1586 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1587 * any time if they don't.
1590 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1592 struct hci_dev *hdev = conn->hcon->hdev;
1595 /* We need to check whether l2cap_conn is registered. If it is not, we
1596 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1597 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1598 * relies on the parent hci_conn object to be locked. This itself relies
1599 * on the hci_dev object to be locked. So we must lock the hci device
1604 if (user->list.next || user->list.prev) {
1609 /* conn->hchan is NULL after l2cap_conn_del() was called */
1615 ret = user->probe(conn, user);
1619 list_add(&user->list, &conn->users);
1623 hci_dev_unlock(hdev);
1626 EXPORT_SYMBOL(l2cap_register_user);
1628 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1630 struct hci_dev *hdev = conn->hcon->hdev;
1634 if (!user->list.next || !user->list.prev)
1637 list_del(&user->list);
1638 user->list.next = NULL;
1639 user->list.prev = NULL;
1640 user->remove(conn, user);
1643 hci_dev_unlock(hdev);
1645 EXPORT_SYMBOL(l2cap_unregister_user);
1647 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1649 struct l2cap_user *user;
1651 while (!list_empty(&conn->users)) {
1652 user = list_first_entry(&conn->users, struct l2cap_user, list);
1653 list_del(&user->list);
1654 user->list.next = NULL;
1655 user->list.prev = NULL;
1656 user->remove(conn, user);
1660 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1662 struct l2cap_conn *conn = hcon->l2cap_data;
1663 struct l2cap_chan *chan, *l;
1668 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1670 kfree_skb(conn->rx_skb);
1672 l2cap_unregister_all_users(conn);
1674 mutex_lock(&conn->chan_lock);
1677 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1678 l2cap_chan_hold(chan);
1679 l2cap_chan_lock(chan);
1681 l2cap_chan_del(chan, err);
1683 l2cap_chan_unlock(chan);
1685 chan->ops->close(chan);
1686 l2cap_chan_put(chan);
1689 mutex_unlock(&conn->chan_lock);
1691 hci_chan_del(conn->hchan);
1693 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1694 cancel_delayed_work_sync(&conn->info_timer);
1696 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1697 cancel_delayed_work_sync(&conn->security_timer);
1698 smp_chan_destroy(conn);
1701 hcon->l2cap_data = NULL;
1703 l2cap_conn_put(conn);
1706 static void security_timeout(struct work_struct *work)
1708 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1709 security_timer.work);
1711 BT_DBG("conn %p", conn);
1713 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1714 smp_chan_destroy(conn);
1715 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1719 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1721 struct l2cap_conn *conn = hcon->l2cap_data;
1722 struct hci_chan *hchan;
1727 hchan = hci_chan_create(hcon);
1731 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1733 hci_chan_del(hchan);
1737 kref_init(&conn->ref);
1738 hcon->l2cap_data = conn;
1740 hci_conn_get(conn->hcon);
1741 conn->hchan = hchan;
1743 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1745 switch (hcon->type) {
1747 if (hcon->hdev->le_mtu) {
1748 conn->mtu = hcon->hdev->le_mtu;
1753 conn->mtu = hcon->hdev->acl_mtu;
1757 conn->feat_mask = 0;
1759 if (hcon->type == ACL_LINK)
1760 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1761 &hcon->hdev->dev_flags);
1763 spin_lock_init(&conn->lock);
1764 mutex_init(&conn->chan_lock);
1766 INIT_LIST_HEAD(&conn->chan_l);
1767 INIT_LIST_HEAD(&conn->users);
1769 if (hcon->type == LE_LINK)
1770 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1772 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1774 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1779 static void l2cap_conn_free(struct kref *ref)
1781 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1783 hci_conn_put(conn->hcon);
1787 void l2cap_conn_get(struct l2cap_conn *conn)
1789 kref_get(&conn->ref);
1791 EXPORT_SYMBOL(l2cap_conn_get);
1793 void l2cap_conn_put(struct l2cap_conn *conn)
1795 kref_put(&conn->ref, l2cap_conn_free);
1797 EXPORT_SYMBOL(l2cap_conn_put);
1799 /* ---- Socket interface ---- */
1801 /* Find socket with psm and source / destination bdaddr.
1802 * Returns closest match.
1804 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1809 struct l2cap_chan *c, *c1 = NULL;
1811 read_lock(&chan_list_lock);
1813 list_for_each_entry(c, &chan_list, global_l) {
1814 if (state && c->state != state)
1817 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1820 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1823 if (c->psm == psm) {
1824 int src_match, dst_match;
1825 int src_any, dst_any;
1828 src_match = !bacmp(&c->src, src);
1829 dst_match = !bacmp(&c->dst, dst);
1830 if (src_match && dst_match) {
1831 read_unlock(&chan_list_lock);
1836 src_any = !bacmp(&c->src, BDADDR_ANY);
1837 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1838 if ((src_match && dst_any) || (src_any && dst_match) ||
1839 (src_any && dst_any))
1844 read_unlock(&chan_list_lock);
1849 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1850 bdaddr_t *dst, u8 dst_type)
1852 struct l2cap_conn *conn;
1853 struct hci_conn *hcon;
1854 struct hci_dev *hdev;
1858 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1859 dst_type, __le16_to_cpu(psm));
1861 hdev = hci_get_route(dst, &chan->src);
1863 return -EHOSTUNREACH;
1867 l2cap_chan_lock(chan);
1869 /* PSM must be odd and lsb of upper byte must be 0 */
1870 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1871 chan->chan_type != L2CAP_CHAN_RAW) {
1876 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1881 switch (chan->mode) {
1882 case L2CAP_MODE_BASIC:
1883 case L2CAP_MODE_LE_FLOWCTL:
1885 case L2CAP_MODE_ERTM:
1886 case L2CAP_MODE_STREAMING:
1895 switch (chan->state) {
1899 /* Already connecting */
1904 /* Already connected */
1918 /* Set destination address and psm */
1919 bacpy(&chan->dst, dst);
1920 chan->dst_type = dst_type;
1925 auth_type = l2cap_get_auth_type(chan);
1927 if (bdaddr_type_is_le(dst_type))
1928 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1929 chan->sec_level, auth_type);
1931 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1932 chan->sec_level, auth_type);
1935 err = PTR_ERR(hcon);
1939 conn = l2cap_conn_add(hcon);
1941 hci_conn_drop(hcon);
1946 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1947 hci_conn_drop(hcon);
1952 /* Update source addr of the socket */
1953 bacpy(&chan->src, &hcon->src);
1954 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1956 l2cap_chan_unlock(chan);
1957 l2cap_chan_add(conn, chan);
1958 l2cap_chan_lock(chan);
1960 /* l2cap_chan_add takes its own ref so we can drop this one */
1961 hci_conn_drop(hcon);
1963 l2cap_state_change(chan, BT_CONNECT);
1964 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1966 if (hcon->state == BT_CONNECTED) {
1967 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1968 __clear_chan_timer(chan);
1969 if (l2cap_chan_check_security(chan))
1970 l2cap_state_change(chan, BT_CONNECTED);
1972 l2cap_do_start(chan);
1978 l2cap_chan_unlock(chan);
1979 hci_dev_unlock(hdev);
1984 static void l2cap_monitor_timeout(struct work_struct *work)
1986 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1987 monitor_timer.work);
1989 BT_DBG("chan %p", chan);
1991 l2cap_chan_lock(chan);
1994 l2cap_chan_unlock(chan);
1995 l2cap_chan_put(chan);
1999 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2001 l2cap_chan_unlock(chan);
2002 l2cap_chan_put(chan);
2005 static void l2cap_retrans_timeout(struct work_struct *work)
2007 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2008 retrans_timer.work);
2010 BT_DBG("chan %p", chan);
2012 l2cap_chan_lock(chan);
2015 l2cap_chan_unlock(chan);
2016 l2cap_chan_put(chan);
2020 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2021 l2cap_chan_unlock(chan);
2022 l2cap_chan_put(chan);
2025 static void l2cap_streaming_send(struct l2cap_chan *chan,
2026 struct sk_buff_head *skbs)
2028 struct sk_buff *skb;
2029 struct l2cap_ctrl *control;
2031 BT_DBG("chan %p, skbs %p", chan, skbs);
2033 if (__chan_is_moving(chan))
2036 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2038 while (!skb_queue_empty(&chan->tx_q)) {
2040 skb = skb_dequeue(&chan->tx_q);
2042 bt_cb(skb)->control.retries = 1;
2043 control = &bt_cb(skb)->control;
2045 control->reqseq = 0;
2046 control->txseq = chan->next_tx_seq;
2048 __pack_control(chan, control, skb);
2050 if (chan->fcs == L2CAP_FCS_CRC16) {
2051 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2052 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2055 l2cap_do_send(chan, skb);
2057 BT_DBG("Sent txseq %u", control->txseq);
2059 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2060 chan->frames_sent++;
2064 static int l2cap_ertm_send(struct l2cap_chan *chan)
2066 struct sk_buff *skb, *tx_skb;
2067 struct l2cap_ctrl *control;
2070 BT_DBG("chan %p", chan);
2072 if (chan->state != BT_CONNECTED)
2075 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2078 if (__chan_is_moving(chan))
2081 while (chan->tx_send_head &&
2082 chan->unacked_frames < chan->remote_tx_win &&
2083 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2085 skb = chan->tx_send_head;
2087 bt_cb(skb)->control.retries = 1;
2088 control = &bt_cb(skb)->control;
2090 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2093 control->reqseq = chan->buffer_seq;
2094 chan->last_acked_seq = chan->buffer_seq;
2095 control->txseq = chan->next_tx_seq;
2097 __pack_control(chan, control, skb);
2099 if (chan->fcs == L2CAP_FCS_CRC16) {
2100 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2101 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2104 /* Clone after data has been modified. Data is assumed to be
2105 read-only (for locking purposes) on cloned sk_buffs.
2107 tx_skb = skb_clone(skb, GFP_KERNEL);
2112 __set_retrans_timer(chan);
2114 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2115 chan->unacked_frames++;
2116 chan->frames_sent++;
2119 if (skb_queue_is_last(&chan->tx_q, skb))
2120 chan->tx_send_head = NULL;
2122 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2124 l2cap_do_send(chan, tx_skb);
2125 BT_DBG("Sent txseq %u", control->txseq);
2128 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2129 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2134 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2136 struct l2cap_ctrl control;
2137 struct sk_buff *skb;
2138 struct sk_buff *tx_skb;
2141 BT_DBG("chan %p", chan);
2143 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2146 if (__chan_is_moving(chan))
2149 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2150 seq = l2cap_seq_list_pop(&chan->retrans_list);
2152 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2154 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2159 bt_cb(skb)->control.retries++;
2160 control = bt_cb(skb)->control;
2162 if (chan->max_tx != 0 &&
2163 bt_cb(skb)->control.retries > chan->max_tx) {
2164 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2165 l2cap_send_disconn_req(chan, ECONNRESET);
2166 l2cap_seq_list_clear(&chan->retrans_list);
2170 control.reqseq = chan->buffer_seq;
2171 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2176 if (skb_cloned(skb)) {
2177 /* Cloned sk_buffs are read-only, so we need a
2180 tx_skb = skb_copy(skb, GFP_KERNEL);
2182 tx_skb = skb_clone(skb, GFP_KERNEL);
2186 l2cap_seq_list_clear(&chan->retrans_list);
2190 /* Update skb contents */
2191 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2192 put_unaligned_le32(__pack_extended_control(&control),
2193 tx_skb->data + L2CAP_HDR_SIZE);
2195 put_unaligned_le16(__pack_enhanced_control(&control),
2196 tx_skb->data + L2CAP_HDR_SIZE);
2199 if (chan->fcs == L2CAP_FCS_CRC16) {
2200 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2201 put_unaligned_le16(fcs, skb_put(tx_skb,
2205 l2cap_do_send(chan, tx_skb);
2207 BT_DBG("Resent txseq %d", control.txseq);
2209 chan->last_acked_seq = chan->buffer_seq;
2213 static void l2cap_retransmit(struct l2cap_chan *chan,
2214 struct l2cap_ctrl *control)
2216 BT_DBG("chan %p, control %p", chan, control);
2218 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2219 l2cap_ertm_resend(chan);
2222 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2223 struct l2cap_ctrl *control)
2225 struct sk_buff *skb;
2227 BT_DBG("chan %p, control %p", chan, control);
2230 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2232 l2cap_seq_list_clear(&chan->retrans_list);
2234 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2237 if (chan->unacked_frames) {
2238 skb_queue_walk(&chan->tx_q, skb) {
2239 if (bt_cb(skb)->control.txseq == control->reqseq ||
2240 skb == chan->tx_send_head)
2244 skb_queue_walk_from(&chan->tx_q, skb) {
2245 if (skb == chan->tx_send_head)
2248 l2cap_seq_list_append(&chan->retrans_list,
2249 bt_cb(skb)->control.txseq);
2252 l2cap_ertm_resend(chan);
2256 static void l2cap_send_ack(struct l2cap_chan *chan)
2258 struct l2cap_ctrl control;
2259 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2260 chan->last_acked_seq);
2263 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2264 chan, chan->last_acked_seq, chan->buffer_seq);
2266 memset(&control, 0, sizeof(control));
2269 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2270 chan->rx_state == L2CAP_RX_STATE_RECV) {
2271 __clear_ack_timer(chan);
2272 control.super = L2CAP_SUPER_RNR;
2273 control.reqseq = chan->buffer_seq;
2274 l2cap_send_sframe(chan, &control);
2276 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2277 l2cap_ertm_send(chan);
2278 /* If any i-frames were sent, they included an ack */
2279 if (chan->buffer_seq == chan->last_acked_seq)
2283 /* Ack now if the window is 3/4ths full.
2284 * Calculate without mul or div
2286 threshold = chan->ack_win;
2287 threshold += threshold << 1;
2290 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2293 if (frames_to_ack >= threshold) {
2294 __clear_ack_timer(chan);
2295 control.super = L2CAP_SUPER_RR;
2296 control.reqseq = chan->buffer_seq;
2297 l2cap_send_sframe(chan, &control);
2302 __set_ack_timer(chan);
2306 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2307 struct msghdr *msg, int len,
2308 int count, struct sk_buff *skb)
2310 struct l2cap_conn *conn = chan->conn;
2311 struct sk_buff **frag;
2314 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2320 /* Continuation fragments (no L2CAP header) */
2321 frag = &skb_shinfo(skb)->frag_list;
2323 struct sk_buff *tmp;
2325 count = min_t(unsigned int, conn->mtu, len);
2327 tmp = chan->ops->alloc_skb(chan, count,
2328 msg->msg_flags & MSG_DONTWAIT);
2330 return PTR_ERR(tmp);
2334 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2337 (*frag)->priority = skb->priority;
2342 skb->len += (*frag)->len;
2343 skb->data_len += (*frag)->len;
2345 frag = &(*frag)->next;
2351 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2352 struct msghdr *msg, size_t len,
2355 struct l2cap_conn *conn = chan->conn;
2356 struct sk_buff *skb;
2357 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2358 struct l2cap_hdr *lh;
2360 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2361 __le16_to_cpu(chan->psm), len, priority);
2363 count = min_t(unsigned int, (conn->mtu - hlen), len);
2365 skb = chan->ops->alloc_skb(chan, count + hlen,
2366 msg->msg_flags & MSG_DONTWAIT);
2370 skb->priority = priority;
2372 /* Create L2CAP header */
2373 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2374 lh->cid = cpu_to_le16(chan->dcid);
2375 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2376 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2378 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2379 if (unlikely(err < 0)) {
2381 return ERR_PTR(err);
2386 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2387 struct msghdr *msg, size_t len,
2390 struct l2cap_conn *conn = chan->conn;
2391 struct sk_buff *skb;
2393 struct l2cap_hdr *lh;
2395 BT_DBG("chan %p len %zu", chan, len);
2397 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2399 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2400 msg->msg_flags & MSG_DONTWAIT);
2404 skb->priority = priority;
2406 /* Create L2CAP header */
2407 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2408 lh->cid = cpu_to_le16(chan->dcid);
2409 lh->len = cpu_to_le16(len);
2411 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2412 if (unlikely(err < 0)) {
2414 return ERR_PTR(err);
2419 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2420 struct msghdr *msg, size_t len,
2423 struct l2cap_conn *conn = chan->conn;
2424 struct sk_buff *skb;
2425 int err, count, hlen;
2426 struct l2cap_hdr *lh;
2428 BT_DBG("chan %p len %zu", chan, len);
2431 return ERR_PTR(-ENOTCONN);
2433 hlen = __ertm_hdr_size(chan);
2436 hlen += L2CAP_SDULEN_SIZE;
2438 if (chan->fcs == L2CAP_FCS_CRC16)
2439 hlen += L2CAP_FCS_SIZE;
2441 count = min_t(unsigned int, (conn->mtu - hlen), len);
2443 skb = chan->ops->alloc_skb(chan, count + hlen,
2444 msg->msg_flags & MSG_DONTWAIT);
2448 /* Create L2CAP header */
2449 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2450 lh->cid = cpu_to_le16(chan->dcid);
2451 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2453 /* Control header is populated later */
2454 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2455 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2457 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2460 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2462 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2463 if (unlikely(err < 0)) {
2465 return ERR_PTR(err);
2468 bt_cb(skb)->control.fcs = chan->fcs;
2469 bt_cb(skb)->control.retries = 0;
2473 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2474 struct sk_buff_head *seg_queue,
2475 struct msghdr *msg, size_t len)
2477 struct sk_buff *skb;
2482 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2484 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2485 * so fragmented skbs are not used. The HCI layer's handling
2486 * of fragmented skbs is not compatible with ERTM's queueing.
2489 /* PDU size is derived from the HCI MTU */
2490 pdu_len = chan->conn->mtu;
2492 /* Constrain PDU size for BR/EDR connections */
2494 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2496 /* Adjust for largest possible L2CAP overhead. */
2498 pdu_len -= L2CAP_FCS_SIZE;
2500 pdu_len -= __ertm_hdr_size(chan);
2502 /* Remote device may have requested smaller PDUs */
2503 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2505 if (len <= pdu_len) {
2506 sar = L2CAP_SAR_UNSEGMENTED;
2510 sar = L2CAP_SAR_START;
2512 pdu_len -= L2CAP_SDULEN_SIZE;
2516 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2519 __skb_queue_purge(seg_queue);
2520 return PTR_ERR(skb);
2523 bt_cb(skb)->control.sar = sar;
2524 __skb_queue_tail(seg_queue, skb);
2529 pdu_len += L2CAP_SDULEN_SIZE;
2532 if (len <= pdu_len) {
2533 sar = L2CAP_SAR_END;
2536 sar = L2CAP_SAR_CONTINUE;
2543 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2545 size_t len, u16 sdulen)
2547 struct l2cap_conn *conn = chan->conn;
2548 struct sk_buff *skb;
2549 int err, count, hlen;
2550 struct l2cap_hdr *lh;
2552 BT_DBG("chan %p len %zu", chan, len);
2555 return ERR_PTR(-ENOTCONN);
2557 hlen = L2CAP_HDR_SIZE;
2560 hlen += L2CAP_SDULEN_SIZE;
2562 count = min_t(unsigned int, (conn->mtu - hlen), len);
2564 skb = chan->ops->alloc_skb(chan, count + hlen,
2565 msg->msg_flags & MSG_DONTWAIT);
2569 /* Create L2CAP header */
2570 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2571 lh->cid = cpu_to_le16(chan->dcid);
2572 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2575 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2577 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2578 if (unlikely(err < 0)) {
2580 return ERR_PTR(err);
2586 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2587 struct sk_buff_head *seg_queue,
2588 struct msghdr *msg, size_t len)
2590 struct sk_buff *skb;
2594 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2596 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2598 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2601 pdu_len -= L2CAP_SDULEN_SIZE;
2607 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2609 __skb_queue_purge(seg_queue);
2610 return PTR_ERR(skb);
2613 __skb_queue_tail(seg_queue, skb);
2619 pdu_len += L2CAP_SDULEN_SIZE;
2626 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2629 struct sk_buff *skb;
2631 struct sk_buff_head seg_queue;
2636 /* Connectionless channel */
2637 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2638 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2640 return PTR_ERR(skb);
2642 l2cap_do_send(chan, skb);
2646 switch (chan->mode) {
2647 case L2CAP_MODE_LE_FLOWCTL:
2648 /* Check outgoing MTU */
2649 if (len > chan->omtu)
2652 if (!chan->tx_credits)
2655 __skb_queue_head_init(&seg_queue);
2657 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2659 if (chan->state != BT_CONNECTED) {
2660 __skb_queue_purge(&seg_queue);
2667 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2669 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2670 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2674 if (!chan->tx_credits)
2675 chan->ops->suspend(chan);
2681 case L2CAP_MODE_BASIC:
2682 /* Check outgoing MTU */
2683 if (len > chan->omtu)
2686 /* Create a basic PDU */
2687 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2689 return PTR_ERR(skb);
2691 l2cap_do_send(chan, skb);
2695 case L2CAP_MODE_ERTM:
2696 case L2CAP_MODE_STREAMING:
2697 /* Check outgoing MTU */
2698 if (len > chan->omtu) {
2703 __skb_queue_head_init(&seg_queue);
2705 /* Do segmentation before calling in to the state machine,
2706 * since it's possible to block while waiting for memory
2709 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2711 /* The channel could have been closed while segmenting,
2712 * check that it is still connected.
2714 if (chan->state != BT_CONNECTED) {
2715 __skb_queue_purge(&seg_queue);
2722 if (chan->mode == L2CAP_MODE_ERTM)
2723 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2725 l2cap_streaming_send(chan, &seg_queue);
2729 /* If the skbs were not queued for sending, they'll still be in
2730 * seg_queue and need to be purged.
2732 __skb_queue_purge(&seg_queue);
2736 BT_DBG("bad state %1.1x", chan->mode);
2743 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2745 struct l2cap_ctrl control;
2748 BT_DBG("chan %p, txseq %u", chan, txseq);
2750 memset(&control, 0, sizeof(control));
2752 control.super = L2CAP_SUPER_SREJ;
2754 for (seq = chan->expected_tx_seq; seq != txseq;
2755 seq = __next_seq(chan, seq)) {
2756 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2757 control.reqseq = seq;
2758 l2cap_send_sframe(chan, &control);
2759 l2cap_seq_list_append(&chan->srej_list, seq);
2763 chan->expected_tx_seq = __next_seq(chan, txseq);
2766 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2768 struct l2cap_ctrl control;
2770 BT_DBG("chan %p", chan);
2772 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2775 memset(&control, 0, sizeof(control));
2777 control.super = L2CAP_SUPER_SREJ;
2778 control.reqseq = chan->srej_list.tail;
2779 l2cap_send_sframe(chan, &control);
2782 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2784 struct l2cap_ctrl control;
2788 BT_DBG("chan %p, txseq %u", chan, txseq);
2790 memset(&control, 0, sizeof(control));
2792 control.super = L2CAP_SUPER_SREJ;
2794 /* Capture initial list head to allow only one pass through the list. */
2795 initial_head = chan->srej_list.head;
2798 seq = l2cap_seq_list_pop(&chan->srej_list);
2799 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2802 control.reqseq = seq;
2803 l2cap_send_sframe(chan, &control);
2804 l2cap_seq_list_append(&chan->srej_list, seq);
2805 } while (chan->srej_list.head != initial_head);
2808 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2810 struct sk_buff *acked_skb;
2813 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2815 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2818 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2819 chan->expected_ack_seq, chan->unacked_frames);
2821 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2822 ackseq = __next_seq(chan, ackseq)) {
2824 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2826 skb_unlink(acked_skb, &chan->tx_q);
2827 kfree_skb(acked_skb);
2828 chan->unacked_frames--;
2832 chan->expected_ack_seq = reqseq;
2834 if (chan->unacked_frames == 0)
2835 __clear_retrans_timer(chan);
2837 BT_DBG("unacked_frames %u", chan->unacked_frames);
2840 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2842 BT_DBG("chan %p", chan);
2844 chan->expected_tx_seq = chan->buffer_seq;
2845 l2cap_seq_list_clear(&chan->srej_list);
2846 skb_queue_purge(&chan->srej_q);
2847 chan->rx_state = L2CAP_RX_STATE_RECV;
2850 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2851 struct l2cap_ctrl *control,
2852 struct sk_buff_head *skbs, u8 event)
2854 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2858 case L2CAP_EV_DATA_REQUEST:
2859 if (chan->tx_send_head == NULL)
2860 chan->tx_send_head = skb_peek(skbs);
2862 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2863 l2cap_ertm_send(chan);
2865 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2866 BT_DBG("Enter LOCAL_BUSY");
2867 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2869 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2870 /* The SREJ_SENT state must be aborted if we are to
2871 * enter the LOCAL_BUSY state.
2873 l2cap_abort_rx_srej_sent(chan);
2876 l2cap_send_ack(chan);
2879 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2880 BT_DBG("Exit LOCAL_BUSY");
2881 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2883 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2884 struct l2cap_ctrl local_control;
2886 memset(&local_control, 0, sizeof(local_control));
2887 local_control.sframe = 1;
2888 local_control.super = L2CAP_SUPER_RR;
2889 local_control.poll = 1;
2890 local_control.reqseq = chan->buffer_seq;
2891 l2cap_send_sframe(chan, &local_control);
2893 chan->retry_count = 1;
2894 __set_monitor_timer(chan);
2895 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2898 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2899 l2cap_process_reqseq(chan, control->reqseq);
2901 case L2CAP_EV_EXPLICIT_POLL:
2902 l2cap_send_rr_or_rnr(chan, 1);
2903 chan->retry_count = 1;
2904 __set_monitor_timer(chan);
2905 __clear_ack_timer(chan);
2906 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2908 case L2CAP_EV_RETRANS_TO:
2909 l2cap_send_rr_or_rnr(chan, 1);
2910 chan->retry_count = 1;
2911 __set_monitor_timer(chan);
2912 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2914 case L2CAP_EV_RECV_FBIT:
2915 /* Nothing to process */
2922 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2923 struct l2cap_ctrl *control,
2924 struct sk_buff_head *skbs, u8 event)
2926 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2930 case L2CAP_EV_DATA_REQUEST:
2931 if (chan->tx_send_head == NULL)
2932 chan->tx_send_head = skb_peek(skbs);
2933 /* Queue data, but don't send. */
2934 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2936 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2937 BT_DBG("Enter LOCAL_BUSY");
2938 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2940 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2941 /* The SREJ_SENT state must be aborted if we are to
2942 * enter the LOCAL_BUSY state.
2944 l2cap_abort_rx_srej_sent(chan);
2947 l2cap_send_ack(chan);
2950 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2951 BT_DBG("Exit LOCAL_BUSY");
2952 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2954 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2955 struct l2cap_ctrl local_control;
2956 memset(&local_control, 0, sizeof(local_control));
2957 local_control.sframe = 1;
2958 local_control.super = L2CAP_SUPER_RR;
2959 local_control.poll = 1;
2960 local_control.reqseq = chan->buffer_seq;
2961 l2cap_send_sframe(chan, &local_control);
2963 chan->retry_count = 1;
2964 __set_monitor_timer(chan);
2965 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2968 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2969 l2cap_process_reqseq(chan, control->reqseq);
2973 case L2CAP_EV_RECV_FBIT:
2974 if (control && control->final) {
2975 __clear_monitor_timer(chan);
2976 if (chan->unacked_frames > 0)
2977 __set_retrans_timer(chan);
2978 chan->retry_count = 0;
2979 chan->tx_state = L2CAP_TX_STATE_XMIT;
2980 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2983 case L2CAP_EV_EXPLICIT_POLL:
2986 case L2CAP_EV_MONITOR_TO:
2987 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2988 l2cap_send_rr_or_rnr(chan, 1);
2989 __set_monitor_timer(chan);
2990 chan->retry_count++;
2992 l2cap_send_disconn_req(chan, ECONNABORTED);
3000 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3001 struct sk_buff_head *skbs, u8 event)
3003 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3004 chan, control, skbs, event, chan->tx_state);
3006 switch (chan->tx_state) {
3007 case L2CAP_TX_STATE_XMIT:
3008 l2cap_tx_state_xmit(chan, control, skbs, event);
3010 case L2CAP_TX_STATE_WAIT_F:
3011 l2cap_tx_state_wait_f(chan, control, skbs, event);
3019 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3020 struct l2cap_ctrl *control)
3022 BT_DBG("chan %p, control %p", chan, control);
3023 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3026 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3027 struct l2cap_ctrl *control)
3029 BT_DBG("chan %p, control %p", chan, control);
3030 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3033 /* Copy frame to all raw sockets on that connection */
3034 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3036 struct sk_buff *nskb;
3037 struct l2cap_chan *chan;
3039 BT_DBG("conn %p", conn);
3041 mutex_lock(&conn->chan_lock);
3043 list_for_each_entry(chan, &conn->chan_l, list) {
3044 if (chan->chan_type != L2CAP_CHAN_RAW)
3047 /* Don't send frame to the channel it came from */
3048 if (bt_cb(skb)->chan == chan)
3051 nskb = skb_clone(skb, GFP_KERNEL);
3054 if (chan->ops->recv(chan, nskb))
3058 mutex_unlock(&conn->chan_lock);
3061 /* ---- L2CAP signalling commands ---- */
3062 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3063 u8 ident, u16 dlen, void *data)
3065 struct sk_buff *skb, **frag;
3066 struct l2cap_cmd_hdr *cmd;
3067 struct l2cap_hdr *lh;
3070 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3071 conn, code, ident, dlen);
3073 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3076 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3077 count = min_t(unsigned int, conn->mtu, len);
3079 skb = bt_skb_alloc(count, GFP_KERNEL);
3083 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3084 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3086 if (conn->hcon->type == LE_LINK)
3087 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3089 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3091 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3094 cmd->len = cpu_to_le16(dlen);
3097 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3098 memcpy(skb_put(skb, count), data, count);
3104 /* Continuation fragments (no L2CAP header) */
3105 frag = &skb_shinfo(skb)->frag_list;
3107 count = min_t(unsigned int, conn->mtu, len);
3109 *frag = bt_skb_alloc(count, GFP_KERNEL);
3113 memcpy(skb_put(*frag, count), data, count);
3118 frag = &(*frag)->next;
3128 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3131 struct l2cap_conf_opt *opt = *ptr;
3134 len = L2CAP_CONF_OPT_SIZE + opt->len;
3142 *val = *((u8 *) opt->val);
3146 *val = get_unaligned_le16(opt->val);
3150 *val = get_unaligned_le32(opt->val);
3154 *val = (unsigned long) opt->val;
3158 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3162 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3164 struct l2cap_conf_opt *opt = *ptr;
3166 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3173 *((u8 *) opt->val) = val;
3177 put_unaligned_le16(val, opt->val);
3181 put_unaligned_le32(val, opt->val);
3185 memcpy(opt->val, (void *) val, len);
3189 *ptr += L2CAP_CONF_OPT_SIZE + len;
3192 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3194 struct l2cap_conf_efs efs;
3196 switch (chan->mode) {
3197 case L2CAP_MODE_ERTM:
3198 efs.id = chan->local_id;
3199 efs.stype = chan->local_stype;
3200 efs.msdu = cpu_to_le16(chan->local_msdu);
3201 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3202 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3203 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3206 case L2CAP_MODE_STREAMING:
3208 efs.stype = L2CAP_SERV_BESTEFFORT;
3209 efs.msdu = cpu_to_le16(chan->local_msdu);
3210 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3219 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3220 (unsigned long) &efs);
3223 static void l2cap_ack_timeout(struct work_struct *work)
3225 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3229 BT_DBG("chan %p", chan);
3231 l2cap_chan_lock(chan);
3233 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3234 chan->last_acked_seq);
3237 l2cap_send_rr_or_rnr(chan, 0);
3239 l2cap_chan_unlock(chan);
3240 l2cap_chan_put(chan);
3243 int l2cap_ertm_init(struct l2cap_chan *chan)
3247 chan->next_tx_seq = 0;
3248 chan->expected_tx_seq = 0;
3249 chan->expected_ack_seq = 0;
3250 chan->unacked_frames = 0;
3251 chan->buffer_seq = 0;
3252 chan->frames_sent = 0;
3253 chan->last_acked_seq = 0;
3255 chan->sdu_last_frag = NULL;
3258 skb_queue_head_init(&chan->tx_q);
3260 chan->local_amp_id = AMP_ID_BREDR;
3261 chan->move_id = AMP_ID_BREDR;
3262 chan->move_state = L2CAP_MOVE_STABLE;
3263 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3265 if (chan->mode != L2CAP_MODE_ERTM)
3268 chan->rx_state = L2CAP_RX_STATE_RECV;
3269 chan->tx_state = L2CAP_TX_STATE_XMIT;
3271 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3272 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3273 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3275 skb_queue_head_init(&chan->srej_q);
3277 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3281 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3283 l2cap_seq_list_free(&chan->srej_list);
3288 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3291 case L2CAP_MODE_STREAMING:
3292 case L2CAP_MODE_ERTM:
3293 if (l2cap_mode_supported(mode, remote_feat_mask))
3297 return L2CAP_MODE_BASIC;
3301 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3303 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3306 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3308 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3311 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3312 struct l2cap_conf_rfc *rfc)
3314 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3315 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3317 /* Class 1 devices have must have ERTM timeouts
3318 * exceeding the Link Supervision Timeout. The
3319 * default Link Supervision Timeout for AMP
3320 * controllers is 10 seconds.
3322 * Class 1 devices use 0xffffffff for their
3323 * best-effort flush timeout, so the clamping logic
3324 * will result in a timeout that meets the above
3325 * requirement. ERTM timeouts are 16-bit values, so
3326 * the maximum timeout is 65.535 seconds.
3329 /* Convert timeout to milliseconds and round */
3330 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3332 /* This is the recommended formula for class 2 devices
3333 * that start ERTM timers when packets are sent to the
3336 ertm_to = 3 * ertm_to + 500;
3338 if (ertm_to > 0xffff)
3341 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3342 rfc->monitor_timeout = rfc->retrans_timeout;
3344 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3345 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3349 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3351 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3352 __l2cap_ews_supported(chan->conn)) {
3353 /* use extended control field */
3354 set_bit(FLAG_EXT_CTRL, &chan->flags);
3355 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3357 chan->tx_win = min_t(u16, chan->tx_win,
3358 L2CAP_DEFAULT_TX_WINDOW);
3359 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3361 chan->ack_win = chan->tx_win;
3364 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3366 struct l2cap_conf_req *req = data;
3367 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3368 void *ptr = req->data;
3371 BT_DBG("chan %p", chan);
3373 if (chan->num_conf_req || chan->num_conf_rsp)
3376 switch (chan->mode) {
3377 case L2CAP_MODE_STREAMING:
3378 case L2CAP_MODE_ERTM:
3379 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3382 if (__l2cap_efs_supported(chan->conn))
3383 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3387 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3392 if (chan->imtu != L2CAP_DEFAULT_MTU)
3393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3395 switch (chan->mode) {
3396 case L2CAP_MODE_BASIC:
3397 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3398 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3401 rfc.mode = L2CAP_MODE_BASIC;
3403 rfc.max_transmit = 0;
3404 rfc.retrans_timeout = 0;
3405 rfc.monitor_timeout = 0;
3406 rfc.max_pdu_size = 0;
3408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3409 (unsigned long) &rfc);
3412 case L2CAP_MODE_ERTM:
3413 rfc.mode = L2CAP_MODE_ERTM;
3414 rfc.max_transmit = chan->max_tx;
3416 __l2cap_set_ertm_timeouts(chan, &rfc);
3418 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3419 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3421 rfc.max_pdu_size = cpu_to_le16(size);
3423 l2cap_txwin_setup(chan);
3425 rfc.txwin_size = min_t(u16, chan->tx_win,
3426 L2CAP_DEFAULT_TX_WINDOW);
3428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3429 (unsigned long) &rfc);
3431 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3432 l2cap_add_opt_efs(&ptr, chan);
3434 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3438 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3439 if (chan->fcs == L2CAP_FCS_NONE ||
3440 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3441 chan->fcs = L2CAP_FCS_NONE;
3442 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3447 case L2CAP_MODE_STREAMING:
3448 l2cap_txwin_setup(chan);
3449 rfc.mode = L2CAP_MODE_STREAMING;
3451 rfc.max_transmit = 0;
3452 rfc.retrans_timeout = 0;
3453 rfc.monitor_timeout = 0;
3455 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3456 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3458 rfc.max_pdu_size = cpu_to_le16(size);
3460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3461 (unsigned long) &rfc);
3463 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3464 l2cap_add_opt_efs(&ptr, chan);
3466 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3467 if (chan->fcs == L2CAP_FCS_NONE ||
3468 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3469 chan->fcs = L2CAP_FCS_NONE;
3470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3476 req->dcid = cpu_to_le16(chan->dcid);
3477 req->flags = __constant_cpu_to_le16(0);
3482 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3484 struct l2cap_conf_rsp *rsp = data;
3485 void *ptr = rsp->data;
3486 void *req = chan->conf_req;
3487 int len = chan->conf_len;
3488 int type, hint, olen;
3490 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3491 struct l2cap_conf_efs efs;
3493 u16 mtu = L2CAP_DEFAULT_MTU;
3494 u16 result = L2CAP_CONF_SUCCESS;
3497 BT_DBG("chan %p", chan);
3499 while (len >= L2CAP_CONF_OPT_SIZE) {
3500 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3502 hint = type & L2CAP_CONF_HINT;
3503 type &= L2CAP_CONF_MASK;
3506 case L2CAP_CONF_MTU:
3510 case L2CAP_CONF_FLUSH_TO:
3511 chan->flush_to = val;
3514 case L2CAP_CONF_QOS:
3517 case L2CAP_CONF_RFC:
3518 if (olen == sizeof(rfc))
3519 memcpy(&rfc, (void *) val, olen);
3522 case L2CAP_CONF_FCS:
3523 if (val == L2CAP_FCS_NONE)
3524 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3527 case L2CAP_CONF_EFS:
3529 if (olen == sizeof(efs))
3530 memcpy(&efs, (void *) val, olen);
3533 case L2CAP_CONF_EWS:
3534 if (!chan->conn->hs_enabled)
3535 return -ECONNREFUSED;
3537 set_bit(FLAG_EXT_CTRL, &chan->flags);
3538 set_bit(CONF_EWS_RECV, &chan->conf_state);
3539 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3540 chan->remote_tx_win = val;
3547 result = L2CAP_CONF_UNKNOWN;
3548 *((u8 *) ptr++) = type;
3553 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3556 switch (chan->mode) {
3557 case L2CAP_MODE_STREAMING:
3558 case L2CAP_MODE_ERTM:
3559 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3560 chan->mode = l2cap_select_mode(rfc.mode,
3561 chan->conn->feat_mask);
3566 if (__l2cap_efs_supported(chan->conn))
3567 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3569 return -ECONNREFUSED;
3572 if (chan->mode != rfc.mode)
3573 return -ECONNREFUSED;
3579 if (chan->mode != rfc.mode) {
3580 result = L2CAP_CONF_UNACCEPT;
3581 rfc.mode = chan->mode;
3583 if (chan->num_conf_rsp == 1)
3584 return -ECONNREFUSED;
3586 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3587 (unsigned long) &rfc);
3590 if (result == L2CAP_CONF_SUCCESS) {
3591 /* Configure output options and let the other side know
3592 * which ones we don't like. */
3594 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3595 result = L2CAP_CONF_UNACCEPT;
3598 set_bit(CONF_MTU_DONE, &chan->conf_state);
3600 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3603 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3604 efs.stype != L2CAP_SERV_NOTRAFIC &&
3605 efs.stype != chan->local_stype) {
3607 result = L2CAP_CONF_UNACCEPT;
3609 if (chan->num_conf_req >= 1)
3610 return -ECONNREFUSED;
3612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3614 (unsigned long) &efs);
3616 /* Send PENDING Conf Rsp */
3617 result = L2CAP_CONF_PENDING;
3618 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3623 case L2CAP_MODE_BASIC:
3624 chan->fcs = L2CAP_FCS_NONE;
3625 set_bit(CONF_MODE_DONE, &chan->conf_state);
3628 case L2CAP_MODE_ERTM:
3629 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3630 chan->remote_tx_win = rfc.txwin_size;
3632 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3634 chan->remote_max_tx = rfc.max_transmit;
3636 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3637 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3638 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3639 rfc.max_pdu_size = cpu_to_le16(size);
3640 chan->remote_mps = size;
3642 __l2cap_set_ertm_timeouts(chan, &rfc);
3644 set_bit(CONF_MODE_DONE, &chan->conf_state);
3646 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3647 sizeof(rfc), (unsigned long) &rfc);
3649 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3650 chan->remote_id = efs.id;
3651 chan->remote_stype = efs.stype;
3652 chan->remote_msdu = le16_to_cpu(efs.msdu);
3653 chan->remote_flush_to =
3654 le32_to_cpu(efs.flush_to);
3655 chan->remote_acc_lat =
3656 le32_to_cpu(efs.acc_lat);
3657 chan->remote_sdu_itime =
3658 le32_to_cpu(efs.sdu_itime);
3659 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3661 (unsigned long) &efs);
3665 case L2CAP_MODE_STREAMING:
3666 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3667 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3668 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3669 rfc.max_pdu_size = cpu_to_le16(size);
3670 chan->remote_mps = size;
3672 set_bit(CONF_MODE_DONE, &chan->conf_state);
3674 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3675 (unsigned long) &rfc);
3680 result = L2CAP_CONF_UNACCEPT;
3682 memset(&rfc, 0, sizeof(rfc));
3683 rfc.mode = chan->mode;
3686 if (result == L2CAP_CONF_SUCCESS)
3687 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3689 rsp->scid = cpu_to_le16(chan->dcid);
3690 rsp->result = cpu_to_le16(result);
3691 rsp->flags = __constant_cpu_to_le16(0);
3696 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3697 void *data, u16 *result)
3699 struct l2cap_conf_req *req = data;
3700 void *ptr = req->data;
3703 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3704 struct l2cap_conf_efs efs;
3706 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3708 while (len >= L2CAP_CONF_OPT_SIZE) {
3709 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3712 case L2CAP_CONF_MTU:
3713 if (val < L2CAP_DEFAULT_MIN_MTU) {
3714 *result = L2CAP_CONF_UNACCEPT;
3715 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3718 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3721 case L2CAP_CONF_FLUSH_TO:
3722 chan->flush_to = val;
3723 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3727 case L2CAP_CONF_RFC:
3728 if (olen == sizeof(rfc))
3729 memcpy(&rfc, (void *)val, olen);
3731 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3732 rfc.mode != chan->mode)
3733 return -ECONNREFUSED;
3737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3738 sizeof(rfc), (unsigned long) &rfc);
3741 case L2CAP_CONF_EWS:
3742 chan->ack_win = min_t(u16, val, chan->ack_win);
3743 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3747 case L2CAP_CONF_EFS:
3748 if (olen == sizeof(efs))
3749 memcpy(&efs, (void *)val, olen);
3751 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3752 efs.stype != L2CAP_SERV_NOTRAFIC &&
3753 efs.stype != chan->local_stype)
3754 return -ECONNREFUSED;
3756 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3757 (unsigned long) &efs);
3760 case L2CAP_CONF_FCS:
3761 if (*result == L2CAP_CONF_PENDING)
3762 if (val == L2CAP_FCS_NONE)
3763 set_bit(CONF_RECV_NO_FCS,
3769 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3770 return -ECONNREFUSED;
3772 chan->mode = rfc.mode;
3774 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3776 case L2CAP_MODE_ERTM:
3777 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3778 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3779 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3780 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3781 chan->ack_win = min_t(u16, chan->ack_win,
3784 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3785 chan->local_msdu = le16_to_cpu(efs.msdu);
3786 chan->local_sdu_itime =
3787 le32_to_cpu(efs.sdu_itime);
3788 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3789 chan->local_flush_to =
3790 le32_to_cpu(efs.flush_to);
3794 case L2CAP_MODE_STREAMING:
3795 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3799 req->dcid = cpu_to_le16(chan->dcid);
3800 req->flags = __constant_cpu_to_le16(0);
3805 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3806 u16 result, u16 flags)
3808 struct l2cap_conf_rsp *rsp = data;
3809 void *ptr = rsp->data;
3811 BT_DBG("chan %p", chan);
3813 rsp->scid = cpu_to_le16(chan->dcid);
3814 rsp->result = cpu_to_le16(result);
3815 rsp->flags = cpu_to_le16(flags);
3820 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3822 struct l2cap_le_conn_rsp rsp;
3823 struct l2cap_conn *conn = chan->conn;
3825 BT_DBG("chan %p", chan);
3827 rsp.dcid = cpu_to_le16(chan->scid);
3828 rsp.mtu = cpu_to_le16(chan->imtu);
3829 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
3830 rsp.credits = cpu_to_le16(chan->rx_credits);
3831 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3833 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3837 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3839 struct l2cap_conn_rsp rsp;
3840 struct l2cap_conn *conn = chan->conn;
3844 rsp.scid = cpu_to_le16(chan->dcid);
3845 rsp.dcid = cpu_to_le16(chan->scid);
3846 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3847 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3850 rsp_code = L2CAP_CREATE_CHAN_RSP;
3852 rsp_code = L2CAP_CONN_RSP;
3854 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3856 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3858 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3861 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3862 l2cap_build_conf_req(chan, buf), buf);
3863 chan->num_conf_req++;
3866 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3870 /* Use sane default values in case a misbehaving remote device
3871 * did not send an RFC or extended window size option.
3873 u16 txwin_ext = chan->ack_win;
3874 struct l2cap_conf_rfc rfc = {
3876 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3877 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3878 .max_pdu_size = cpu_to_le16(chan->imtu),
3879 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3882 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3884 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3887 while (len >= L2CAP_CONF_OPT_SIZE) {
3888 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3891 case L2CAP_CONF_RFC:
3892 if (olen == sizeof(rfc))
3893 memcpy(&rfc, (void *)val, olen);
3895 case L2CAP_CONF_EWS:
3902 case L2CAP_MODE_ERTM:
3903 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3904 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3905 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3906 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3907 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3909 chan->ack_win = min_t(u16, chan->ack_win,
3912 case L2CAP_MODE_STREAMING:
3913 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3917 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3918 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3921 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3923 if (cmd_len < sizeof(*rej))
3926 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3929 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3930 cmd->ident == conn->info_ident) {
3931 cancel_delayed_work(&conn->info_timer);
3933 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3934 conn->info_ident = 0;
3936 l2cap_conn_start(conn);
3942 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3943 struct l2cap_cmd_hdr *cmd,
3944 u8 *data, u8 rsp_code, u8 amp_id)
3946 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3947 struct l2cap_conn_rsp rsp;
3948 struct l2cap_chan *chan = NULL, *pchan;
3949 int result, status = L2CAP_CS_NO_INFO;
3951 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3952 __le16 psm = req->psm;
3954 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3956 /* Check if we have socket listening on psm */
3957 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3958 &conn->hcon->dst, ACL_LINK);
3960 result = L2CAP_CR_BAD_PSM;
3964 mutex_lock(&conn->chan_lock);
3965 l2cap_chan_lock(pchan);
3967 /* Check if the ACL is secure enough (if not SDP) */
3968 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3969 !hci_conn_check_link_mode(conn->hcon)) {
3970 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3971 result = L2CAP_CR_SEC_BLOCK;
3975 result = L2CAP_CR_NO_MEM;
3977 /* Check if we already have channel with that dcid */
3978 if (__l2cap_get_chan_by_dcid(conn, scid))
3981 chan = pchan->ops->new_connection(pchan);
3985 /* For certain devices (ex: HID mouse), support for authentication,
3986 * pairing and bonding is optional. For such devices, inorder to avoid
3987 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3988 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3990 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3992 bacpy(&chan->src, &conn->hcon->src);
3993 bacpy(&chan->dst, &conn->hcon->dst);
3994 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3995 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3998 chan->local_amp_id = amp_id;
4000 __l2cap_chan_add(conn, chan);
4004 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4006 chan->ident = cmd->ident;
4008 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4009 if (l2cap_chan_check_security(chan)) {
4010 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4011 l2cap_state_change(chan, BT_CONNECT2);
4012 result = L2CAP_CR_PEND;
4013 status = L2CAP_CS_AUTHOR_PEND;
4014 chan->ops->defer(chan);
4016 /* Force pending result for AMP controllers.
4017 * The connection will succeed after the
4018 * physical link is up.
4020 if (amp_id == AMP_ID_BREDR) {
4021 l2cap_state_change(chan, BT_CONFIG);
4022 result = L2CAP_CR_SUCCESS;
4024 l2cap_state_change(chan, BT_CONNECT2);
4025 result = L2CAP_CR_PEND;
4027 status = L2CAP_CS_NO_INFO;
4030 l2cap_state_change(chan, BT_CONNECT2);
4031 result = L2CAP_CR_PEND;
4032 status = L2CAP_CS_AUTHEN_PEND;
4035 l2cap_state_change(chan, BT_CONNECT2);
4036 result = L2CAP_CR_PEND;
4037 status = L2CAP_CS_NO_INFO;
4041 l2cap_chan_unlock(pchan);
4042 mutex_unlock(&conn->chan_lock);
4045 rsp.scid = cpu_to_le16(scid);
4046 rsp.dcid = cpu_to_le16(dcid);
4047 rsp.result = cpu_to_le16(result);
4048 rsp.status = cpu_to_le16(status);
4049 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4051 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4052 struct l2cap_info_req info;
4053 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4055 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4056 conn->info_ident = l2cap_get_ident(conn);
4058 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4060 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4061 sizeof(info), &info);
4064 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4065 result == L2CAP_CR_SUCCESS) {
4067 set_bit(CONF_REQ_SENT, &chan->conf_state);
4068 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4069 l2cap_build_conf_req(chan, buf), buf);
4070 chan->num_conf_req++;
4076 static int l2cap_connect_req(struct l2cap_conn *conn,
4077 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4079 struct hci_dev *hdev = conn->hcon->hdev;
4080 struct hci_conn *hcon = conn->hcon;
4082 if (cmd_len < sizeof(struct l2cap_conn_req))
4086 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4087 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4088 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4089 hcon->dst_type, 0, NULL, 0,
4091 hci_dev_unlock(hdev);
4093 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4097 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4098 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4101 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4102 u16 scid, dcid, result, status;
4103 struct l2cap_chan *chan;
4107 if (cmd_len < sizeof(*rsp))
4110 scid = __le16_to_cpu(rsp->scid);
4111 dcid = __le16_to_cpu(rsp->dcid);
4112 result = __le16_to_cpu(rsp->result);
4113 status = __le16_to_cpu(rsp->status);
4115 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4116 dcid, scid, result, status);
4118 mutex_lock(&conn->chan_lock);
4121 chan = __l2cap_get_chan_by_scid(conn, scid);
4127 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4136 l2cap_chan_lock(chan);
4139 case L2CAP_CR_SUCCESS:
4140 l2cap_state_change(chan, BT_CONFIG);
4143 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4145 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4148 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4149 l2cap_build_conf_req(chan, req), req);
4150 chan->num_conf_req++;
4154 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4158 l2cap_chan_del(chan, ECONNREFUSED);
4162 l2cap_chan_unlock(chan);
4165 mutex_unlock(&conn->chan_lock);
4170 static inline void set_default_fcs(struct l2cap_chan *chan)
4172 /* FCS is enabled only in ERTM or streaming mode, if one or both
4175 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4176 chan->fcs = L2CAP_FCS_NONE;
4177 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4178 chan->fcs = L2CAP_FCS_CRC16;
4181 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4182 u8 ident, u16 flags)
4184 struct l2cap_conn *conn = chan->conn;
4186 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4189 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4190 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4192 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4193 l2cap_build_conf_rsp(chan, data,
4194 L2CAP_CONF_SUCCESS, flags), data);
4197 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4200 struct l2cap_cmd_rej_cid rej;
4202 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4203 rej.scid = __cpu_to_le16(scid);
4204 rej.dcid = __cpu_to_le16(dcid);
4206 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4209 static inline int l2cap_config_req(struct l2cap_conn *conn,
4210 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4213 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4216 struct l2cap_chan *chan;
4219 if (cmd_len < sizeof(*req))
4222 dcid = __le16_to_cpu(req->dcid);
4223 flags = __le16_to_cpu(req->flags);
4225 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4227 chan = l2cap_get_chan_by_scid(conn, dcid);
4229 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4233 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4234 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4239 /* Reject if config buffer is too small. */
4240 len = cmd_len - sizeof(*req);
4241 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4242 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4243 l2cap_build_conf_rsp(chan, rsp,
4244 L2CAP_CONF_REJECT, flags), rsp);
4249 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4250 chan->conf_len += len;
4252 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4253 /* Incomplete config. Send empty response. */
4254 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4255 l2cap_build_conf_rsp(chan, rsp,
4256 L2CAP_CONF_SUCCESS, flags), rsp);
4260 /* Complete config. */
4261 len = l2cap_parse_conf_req(chan, rsp);
4263 l2cap_send_disconn_req(chan, ECONNRESET);
4267 chan->ident = cmd->ident;
4268 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4269 chan->num_conf_rsp++;
4271 /* Reset config buffer. */
4274 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4277 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4278 set_default_fcs(chan);
4280 if (chan->mode == L2CAP_MODE_ERTM ||
4281 chan->mode == L2CAP_MODE_STREAMING)
4282 err = l2cap_ertm_init(chan);
4285 l2cap_send_disconn_req(chan, -err);
4287 l2cap_chan_ready(chan);
4292 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4294 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4295 l2cap_build_conf_req(chan, buf), buf);
4296 chan->num_conf_req++;
4299 /* Got Conf Rsp PENDING from remote side and asume we sent
4300 Conf Rsp PENDING in the code above */
4301 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4302 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4304 /* check compatibility */
4306 /* Send rsp for BR/EDR channel */
4308 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4310 chan->ident = cmd->ident;
4314 l2cap_chan_unlock(chan);
4318 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4319 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4322 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4323 u16 scid, flags, result;
4324 struct l2cap_chan *chan;
4325 int len = cmd_len - sizeof(*rsp);
4328 if (cmd_len < sizeof(*rsp))
4331 scid = __le16_to_cpu(rsp->scid);
4332 flags = __le16_to_cpu(rsp->flags);
4333 result = __le16_to_cpu(rsp->result);
4335 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4338 chan = l2cap_get_chan_by_scid(conn, scid);
4343 case L2CAP_CONF_SUCCESS:
4344 l2cap_conf_rfc_get(chan, rsp->data, len);
4345 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4348 case L2CAP_CONF_PENDING:
4349 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4351 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4354 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4357 l2cap_send_disconn_req(chan, ECONNRESET);
4361 if (!chan->hs_hcon) {
4362 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4365 if (l2cap_check_efs(chan)) {
4366 amp_create_logical_link(chan);
4367 chan->ident = cmd->ident;
4373 case L2CAP_CONF_UNACCEPT:
4374 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4377 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4378 l2cap_send_disconn_req(chan, ECONNRESET);
4382 /* throw out any old stored conf requests */
4383 result = L2CAP_CONF_SUCCESS;
4384 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4387 l2cap_send_disconn_req(chan, ECONNRESET);
4391 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4392 L2CAP_CONF_REQ, len, req);
4393 chan->num_conf_req++;
4394 if (result != L2CAP_CONF_SUCCESS)
4400 l2cap_chan_set_err(chan, ECONNRESET);
4402 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4403 l2cap_send_disconn_req(chan, ECONNRESET);
4407 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4410 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4412 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4413 set_default_fcs(chan);
4415 if (chan->mode == L2CAP_MODE_ERTM ||
4416 chan->mode == L2CAP_MODE_STREAMING)
4417 err = l2cap_ertm_init(chan);
4420 l2cap_send_disconn_req(chan, -err);
4422 l2cap_chan_ready(chan);
4426 l2cap_chan_unlock(chan);
4430 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4431 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4434 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4435 struct l2cap_disconn_rsp rsp;
4437 struct l2cap_chan *chan;
4439 if (cmd_len != sizeof(*req))
4442 scid = __le16_to_cpu(req->scid);
4443 dcid = __le16_to_cpu(req->dcid);
4445 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4447 mutex_lock(&conn->chan_lock);
4449 chan = __l2cap_get_chan_by_scid(conn, dcid);
4451 mutex_unlock(&conn->chan_lock);
4452 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4456 l2cap_chan_lock(chan);
4458 rsp.dcid = cpu_to_le16(chan->scid);
4459 rsp.scid = cpu_to_le16(chan->dcid);
4460 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4462 chan->ops->set_shutdown(chan);
4464 l2cap_chan_hold(chan);
4465 l2cap_chan_del(chan, ECONNRESET);
4467 l2cap_chan_unlock(chan);
4469 chan->ops->close(chan);
4470 l2cap_chan_put(chan);
4472 mutex_unlock(&conn->chan_lock);
4477 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4478 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4481 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4483 struct l2cap_chan *chan;
4485 if (cmd_len != sizeof(*rsp))
4488 scid = __le16_to_cpu(rsp->scid);
4489 dcid = __le16_to_cpu(rsp->dcid);
4491 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4493 mutex_lock(&conn->chan_lock);
4495 chan = __l2cap_get_chan_by_scid(conn, scid);
4497 mutex_unlock(&conn->chan_lock);
4501 l2cap_chan_lock(chan);
4503 l2cap_chan_hold(chan);
4504 l2cap_chan_del(chan, 0);
4506 l2cap_chan_unlock(chan);
4508 chan->ops->close(chan);
4509 l2cap_chan_put(chan);
4511 mutex_unlock(&conn->chan_lock);
4516 static inline int l2cap_information_req(struct l2cap_conn *conn,
4517 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4520 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4523 if (cmd_len != sizeof(*req))
4526 type = __le16_to_cpu(req->type);
4528 BT_DBG("type 0x%4.4x", type);
4530 if (type == L2CAP_IT_FEAT_MASK) {
4532 u32 feat_mask = l2cap_feat_mask;
4533 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4534 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4535 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4537 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4539 if (conn->hs_enabled)
4540 feat_mask |= L2CAP_FEAT_EXT_FLOW
4541 | L2CAP_FEAT_EXT_WINDOW;
4543 put_unaligned_le32(feat_mask, rsp->data);
4544 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4546 } else if (type == L2CAP_IT_FIXED_CHAN) {
4548 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4550 if (conn->hs_enabled)
4551 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4553 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4555 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4556 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4557 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4558 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4561 struct l2cap_info_rsp rsp;
4562 rsp.type = cpu_to_le16(type);
4563 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4564 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4571 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4572 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4575 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4578 if (cmd_len < sizeof(*rsp))
4581 type = __le16_to_cpu(rsp->type);
4582 result = __le16_to_cpu(rsp->result);
4584 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4586 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4587 if (cmd->ident != conn->info_ident ||
4588 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4591 cancel_delayed_work(&conn->info_timer);
4593 if (result != L2CAP_IR_SUCCESS) {
4594 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4595 conn->info_ident = 0;
4597 l2cap_conn_start(conn);
4603 case L2CAP_IT_FEAT_MASK:
4604 conn->feat_mask = get_unaligned_le32(rsp->data);
4606 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4607 struct l2cap_info_req req;
4608 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4610 conn->info_ident = l2cap_get_ident(conn);
4612 l2cap_send_cmd(conn, conn->info_ident,
4613 L2CAP_INFO_REQ, sizeof(req), &req);
4615 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4616 conn->info_ident = 0;
4618 l2cap_conn_start(conn);
4622 case L2CAP_IT_FIXED_CHAN:
4623 conn->fixed_chan_mask = rsp->data[0];
4624 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4625 conn->info_ident = 0;
4627 l2cap_conn_start(conn);
4634 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4635 struct l2cap_cmd_hdr *cmd,
4636 u16 cmd_len, void *data)
4638 struct l2cap_create_chan_req *req = data;
4639 struct l2cap_create_chan_rsp rsp;
4640 struct l2cap_chan *chan;
4641 struct hci_dev *hdev;
4644 if (cmd_len != sizeof(*req))
4647 if (!conn->hs_enabled)
4650 psm = le16_to_cpu(req->psm);
4651 scid = le16_to_cpu(req->scid);
4653 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4655 /* For controller id 0 make BR/EDR connection */
4656 if (req->amp_id == AMP_ID_BREDR) {
4657 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4662 /* Validate AMP controller id */
4663 hdev = hci_dev_get(req->amp_id);
4667 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4672 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4675 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4676 struct hci_conn *hs_hcon;
4678 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4682 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4687 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4689 mgr->bredr_chan = chan;
4690 chan->hs_hcon = hs_hcon;
4691 chan->fcs = L2CAP_FCS_NONE;
4692 conn->mtu = hdev->block_mtu;
4701 rsp.scid = cpu_to_le16(scid);
4702 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4703 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4705 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4711 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4713 struct l2cap_move_chan_req req;
4716 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4718 ident = l2cap_get_ident(chan->conn);
4719 chan->ident = ident;
4721 req.icid = cpu_to_le16(chan->scid);
4722 req.dest_amp_id = dest_amp_id;
4724 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4727 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4730 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4732 struct l2cap_move_chan_rsp rsp;
4734 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4736 rsp.icid = cpu_to_le16(chan->dcid);
4737 rsp.result = cpu_to_le16(result);
4739 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4743 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4745 struct l2cap_move_chan_cfm cfm;
4747 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4749 chan->ident = l2cap_get_ident(chan->conn);
4751 cfm.icid = cpu_to_le16(chan->scid);
4752 cfm.result = cpu_to_le16(result);
4754 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4757 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4760 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4762 struct l2cap_move_chan_cfm cfm;
4764 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4766 cfm.icid = cpu_to_le16(icid);
4767 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4769 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4773 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4776 struct l2cap_move_chan_cfm_rsp rsp;
4778 BT_DBG("icid 0x%4.4x", icid);
4780 rsp.icid = cpu_to_le16(icid);
4781 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4784 static void __release_logical_link(struct l2cap_chan *chan)
4786 chan->hs_hchan = NULL;
4787 chan->hs_hcon = NULL;
4789 /* Placeholder - release the logical link */
4792 static void l2cap_logical_fail(struct l2cap_chan *chan)
4794 /* Logical link setup failed */
4795 if (chan->state != BT_CONNECTED) {
4796 /* Create channel failure, disconnect */
4797 l2cap_send_disconn_req(chan, ECONNRESET);
4801 switch (chan->move_role) {
4802 case L2CAP_MOVE_ROLE_RESPONDER:
4803 l2cap_move_done(chan);
4804 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4806 case L2CAP_MOVE_ROLE_INITIATOR:
4807 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4808 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4809 /* Remote has only sent pending or
4810 * success responses, clean up
4812 l2cap_move_done(chan);
4815 /* Other amp move states imply that the move
4816 * has already aborted
4818 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4823 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4824 struct hci_chan *hchan)
4826 struct l2cap_conf_rsp rsp;
4828 chan->hs_hchan = hchan;
4829 chan->hs_hcon->l2cap_data = chan->conn;
4831 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4833 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4836 set_default_fcs(chan);
4838 err = l2cap_ertm_init(chan);
4840 l2cap_send_disconn_req(chan, -err);
4842 l2cap_chan_ready(chan);
4846 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4847 struct hci_chan *hchan)
4849 chan->hs_hcon = hchan->conn;
4850 chan->hs_hcon->l2cap_data = chan->conn;
4852 BT_DBG("move_state %d", chan->move_state);
4854 switch (chan->move_state) {
4855 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4856 /* Move confirm will be sent after a success
4857 * response is received
4859 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4861 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4862 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4863 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4864 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4865 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4866 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4867 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4868 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4869 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4873 /* Move was not in expected state, free the channel */
4874 __release_logical_link(chan);
4876 chan->move_state = L2CAP_MOVE_STABLE;
4880 /* Call with chan locked */
4881 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4884 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4887 l2cap_logical_fail(chan);
4888 __release_logical_link(chan);
4892 if (chan->state != BT_CONNECTED) {
4893 /* Ignore logical link if channel is on BR/EDR */
4894 if (chan->local_amp_id != AMP_ID_BREDR)
4895 l2cap_logical_finish_create(chan, hchan);
4897 l2cap_logical_finish_move(chan, hchan);
4901 void l2cap_move_start(struct l2cap_chan *chan)
4903 BT_DBG("chan %p", chan);
4905 if (chan->local_amp_id == AMP_ID_BREDR) {
4906 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4908 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4909 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4910 /* Placeholder - start physical link setup */
4912 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4913 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4915 l2cap_move_setup(chan);
4916 l2cap_send_move_chan_req(chan, 0);
4920 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4921 u8 local_amp_id, u8 remote_amp_id)
4923 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4924 local_amp_id, remote_amp_id);
4926 chan->fcs = L2CAP_FCS_NONE;
4928 /* Outgoing channel on AMP */
4929 if (chan->state == BT_CONNECT) {
4930 if (result == L2CAP_CR_SUCCESS) {
4931 chan->local_amp_id = local_amp_id;
4932 l2cap_send_create_chan_req(chan, remote_amp_id);
4934 /* Revert to BR/EDR connect */
4935 l2cap_send_conn_req(chan);
4941 /* Incoming channel on AMP */
4942 if (__l2cap_no_conn_pending(chan)) {
4943 struct l2cap_conn_rsp rsp;
4945 rsp.scid = cpu_to_le16(chan->dcid);
4946 rsp.dcid = cpu_to_le16(chan->scid);
4948 if (result == L2CAP_CR_SUCCESS) {
4949 /* Send successful response */
4950 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4951 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4953 /* Send negative response */
4954 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4955 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4958 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4961 if (result == L2CAP_CR_SUCCESS) {
4962 l2cap_state_change(chan, BT_CONFIG);
4963 set_bit(CONF_REQ_SENT, &chan->conf_state);
4964 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4966 l2cap_build_conf_req(chan, buf), buf);
4967 chan->num_conf_req++;
4972 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4975 l2cap_move_setup(chan);
4976 chan->move_id = local_amp_id;
4977 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4979 l2cap_send_move_chan_req(chan, remote_amp_id);
4982 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4984 struct hci_chan *hchan = NULL;
4986 /* Placeholder - get hci_chan for logical link */
4989 if (hchan->state == BT_CONNECTED) {
4990 /* Logical link is ready to go */
4991 chan->hs_hcon = hchan->conn;
4992 chan->hs_hcon->l2cap_data = chan->conn;
4993 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4994 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4996 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4998 /* Wait for logical link to be ready */
4999 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5002 /* Logical link not available */
5003 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5007 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5009 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5011 if (result == -EINVAL)
5012 rsp_result = L2CAP_MR_BAD_ID;
5014 rsp_result = L2CAP_MR_NOT_ALLOWED;
5016 l2cap_send_move_chan_rsp(chan, rsp_result);
5019 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5020 chan->move_state = L2CAP_MOVE_STABLE;
5022 /* Restart data transmission */
5023 l2cap_ertm_send(chan);
5026 /* Invoke with locked chan */
5027 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5029 u8 local_amp_id = chan->local_amp_id;
5030 u8 remote_amp_id = chan->remote_amp_id;
5032 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5033 chan, result, local_amp_id, remote_amp_id);
5035 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5036 l2cap_chan_unlock(chan);
5040 if (chan->state != BT_CONNECTED) {
5041 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5042 } else if (result != L2CAP_MR_SUCCESS) {
5043 l2cap_do_move_cancel(chan, result);
5045 switch (chan->move_role) {
5046 case L2CAP_MOVE_ROLE_INITIATOR:
5047 l2cap_do_move_initiate(chan, local_amp_id,
5050 case L2CAP_MOVE_ROLE_RESPONDER:
5051 l2cap_do_move_respond(chan, result);
5054 l2cap_do_move_cancel(chan, result);
5060 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5061 struct l2cap_cmd_hdr *cmd,
5062 u16 cmd_len, void *data)
5064 struct l2cap_move_chan_req *req = data;
5065 struct l2cap_move_chan_rsp rsp;
5066 struct l2cap_chan *chan;
5068 u16 result = L2CAP_MR_NOT_ALLOWED;
5070 if (cmd_len != sizeof(*req))
5073 icid = le16_to_cpu(req->icid);
5075 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5077 if (!conn->hs_enabled)
5080 chan = l2cap_get_chan_by_dcid(conn, icid);
5082 rsp.icid = cpu_to_le16(icid);
5083 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5084 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5089 chan->ident = cmd->ident;
5091 if (chan->scid < L2CAP_CID_DYN_START ||
5092 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5093 (chan->mode != L2CAP_MODE_ERTM &&
5094 chan->mode != L2CAP_MODE_STREAMING)) {
5095 result = L2CAP_MR_NOT_ALLOWED;
5096 goto send_move_response;
5099 if (chan->local_amp_id == req->dest_amp_id) {
5100 result = L2CAP_MR_SAME_ID;
5101 goto send_move_response;
5104 if (req->dest_amp_id != AMP_ID_BREDR) {
5105 struct hci_dev *hdev;
5106 hdev = hci_dev_get(req->dest_amp_id);
5107 if (!hdev || hdev->dev_type != HCI_AMP ||
5108 !test_bit(HCI_UP, &hdev->flags)) {
5112 result = L2CAP_MR_BAD_ID;
5113 goto send_move_response;
5118 /* Detect a move collision. Only send a collision response
5119 * if this side has "lost", otherwise proceed with the move.
5120 * The winner has the larger bd_addr.
5122 if ((__chan_is_moving(chan) ||
5123 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5124 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5125 result = L2CAP_MR_COLLISION;
5126 goto send_move_response;
5129 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5130 l2cap_move_setup(chan);
5131 chan->move_id = req->dest_amp_id;
5134 if (req->dest_amp_id == AMP_ID_BREDR) {
5135 /* Moving to BR/EDR */
5136 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5137 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5138 result = L2CAP_MR_PEND;
5140 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5141 result = L2CAP_MR_SUCCESS;
5144 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5145 /* Placeholder - uncomment when amp functions are available */
5146 /*amp_accept_physical(chan, req->dest_amp_id);*/
5147 result = L2CAP_MR_PEND;
5151 l2cap_send_move_chan_rsp(chan, result);
5153 l2cap_chan_unlock(chan);
5158 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5160 struct l2cap_chan *chan;
5161 struct hci_chan *hchan = NULL;
5163 chan = l2cap_get_chan_by_scid(conn, icid);
5165 l2cap_send_move_chan_cfm_icid(conn, icid);
5169 __clear_chan_timer(chan);
5170 if (result == L2CAP_MR_PEND)
5171 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5173 switch (chan->move_state) {
5174 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5175 /* Move confirm will be sent when logical link
5178 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5180 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5181 if (result == L2CAP_MR_PEND) {
5183 } else if (test_bit(CONN_LOCAL_BUSY,
5184 &chan->conn_state)) {
5185 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5187 /* Logical link is up or moving to BR/EDR,
5190 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5191 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5194 case L2CAP_MOVE_WAIT_RSP:
5196 if (result == L2CAP_MR_SUCCESS) {
5197 /* Remote is ready, send confirm immediately
5198 * after logical link is ready
5200 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5202 /* Both logical link and move success
5203 * are required to confirm
5205 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5208 /* Placeholder - get hci_chan for logical link */
5210 /* Logical link not available */
5211 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5215 /* If the logical link is not yet connected, do not
5216 * send confirmation.
5218 if (hchan->state != BT_CONNECTED)
5221 /* Logical link is already ready to go */
5223 chan->hs_hcon = hchan->conn;
5224 chan->hs_hcon->l2cap_data = chan->conn;
5226 if (result == L2CAP_MR_SUCCESS) {
5227 /* Can confirm now */
5228 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5230 /* Now only need move success
5233 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5236 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5239 /* Any other amp move state means the move failed. */
5240 chan->move_id = chan->local_amp_id;
5241 l2cap_move_done(chan);
5242 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5245 l2cap_chan_unlock(chan);
5248 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5251 struct l2cap_chan *chan;
5253 chan = l2cap_get_chan_by_ident(conn, ident);
5255 /* Could not locate channel, icid is best guess */
5256 l2cap_send_move_chan_cfm_icid(conn, icid);
5260 __clear_chan_timer(chan);
5262 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5263 if (result == L2CAP_MR_COLLISION) {
5264 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5266 /* Cleanup - cancel move */
5267 chan->move_id = chan->local_amp_id;
5268 l2cap_move_done(chan);
5272 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5274 l2cap_chan_unlock(chan);
5277 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5278 struct l2cap_cmd_hdr *cmd,
5279 u16 cmd_len, void *data)
5281 struct l2cap_move_chan_rsp *rsp = data;
5284 if (cmd_len != sizeof(*rsp))
5287 icid = le16_to_cpu(rsp->icid);
5288 result = le16_to_cpu(rsp->result);
5290 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5292 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5293 l2cap_move_continue(conn, icid, result);
5295 l2cap_move_fail(conn, cmd->ident, icid, result);
5300 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5301 struct l2cap_cmd_hdr *cmd,
5302 u16 cmd_len, void *data)
5304 struct l2cap_move_chan_cfm *cfm = data;
5305 struct l2cap_chan *chan;
5308 if (cmd_len != sizeof(*cfm))
5311 icid = le16_to_cpu(cfm->icid);
5312 result = le16_to_cpu(cfm->result);
5314 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5316 chan = l2cap_get_chan_by_dcid(conn, icid);
5318 /* Spec requires a response even if the icid was not found */
5319 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5323 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5324 if (result == L2CAP_MC_CONFIRMED) {
5325 chan->local_amp_id = chan->move_id;
5326 if (chan->local_amp_id == AMP_ID_BREDR)
5327 __release_logical_link(chan);
5329 chan->move_id = chan->local_amp_id;
5332 l2cap_move_done(chan);
5335 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5337 l2cap_chan_unlock(chan);
5342 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5343 struct l2cap_cmd_hdr *cmd,
5344 u16 cmd_len, void *data)
5346 struct l2cap_move_chan_cfm_rsp *rsp = data;
5347 struct l2cap_chan *chan;
5350 if (cmd_len != sizeof(*rsp))
5353 icid = le16_to_cpu(rsp->icid);
5355 BT_DBG("icid 0x%4.4x", icid);
5357 chan = l2cap_get_chan_by_scid(conn, icid);
5361 __clear_chan_timer(chan);
5363 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5364 chan->local_amp_id = chan->move_id;
5366 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5367 __release_logical_link(chan);
5369 l2cap_move_done(chan);
5372 l2cap_chan_unlock(chan);
5377 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5382 if (min > max || min < 6 || max > 3200)
5385 if (to_multiplier < 10 || to_multiplier > 3200)
5388 if (max >= to_multiplier * 8)
5391 max_latency = (to_multiplier * 8 / max) - 1;
5392 if (latency > 499 || latency > max_latency)
5398 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5399 struct l2cap_cmd_hdr *cmd,
5400 u16 cmd_len, u8 *data)
5402 struct hci_conn *hcon = conn->hcon;
5403 struct l2cap_conn_param_update_req *req;
5404 struct l2cap_conn_param_update_rsp rsp;
5405 u16 min, max, latency, to_multiplier;
5408 if (!(hcon->link_mode & HCI_LM_MASTER))
5411 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5414 req = (struct l2cap_conn_param_update_req *) data;
5415 min = __le16_to_cpu(req->min);
5416 max = __le16_to_cpu(req->max);
5417 latency = __le16_to_cpu(req->latency);
5418 to_multiplier = __le16_to_cpu(req->to_multiplier);
5420 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5421 min, max, latency, to_multiplier);
5423 memset(&rsp, 0, sizeof(rsp));
5425 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5427 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5429 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5431 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5435 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5440 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5441 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5444 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5445 u16 dcid, mtu, mps, credits, result;
5446 struct l2cap_chan *chan;
5449 if (cmd_len < sizeof(*rsp))
5452 dcid = __le16_to_cpu(rsp->dcid);
5453 mtu = __le16_to_cpu(rsp->mtu);
5454 mps = __le16_to_cpu(rsp->mps);
5455 credits = __le16_to_cpu(rsp->credits);
5456 result = __le16_to_cpu(rsp->result);
5458 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5461 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5462 dcid, mtu, mps, credits, result);
5464 mutex_lock(&conn->chan_lock);
5466 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5474 l2cap_chan_lock(chan);
5477 case L2CAP_CR_SUCCESS:
5481 chan->remote_mps = mps;
5482 chan->tx_credits = credits;
5483 l2cap_chan_ready(chan);
5487 l2cap_chan_del(chan, ECONNREFUSED);
5491 l2cap_chan_unlock(chan);
5494 mutex_unlock(&conn->chan_lock);
5499 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5500 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5505 switch (cmd->code) {
5506 case L2CAP_COMMAND_REJ:
5507 l2cap_command_rej(conn, cmd, cmd_len, data);
5510 case L2CAP_CONN_REQ:
5511 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5514 case L2CAP_CONN_RSP:
5515 case L2CAP_CREATE_CHAN_RSP:
5516 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5519 case L2CAP_CONF_REQ:
5520 err = l2cap_config_req(conn, cmd, cmd_len, data);
5523 case L2CAP_CONF_RSP:
5524 l2cap_config_rsp(conn, cmd, cmd_len, data);
5527 case L2CAP_DISCONN_REQ:
5528 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5531 case L2CAP_DISCONN_RSP:
5532 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5535 case L2CAP_ECHO_REQ:
5536 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5539 case L2CAP_ECHO_RSP:
5542 case L2CAP_INFO_REQ:
5543 err = l2cap_information_req(conn, cmd, cmd_len, data);
5546 case L2CAP_INFO_RSP:
5547 l2cap_information_rsp(conn, cmd, cmd_len, data);
5550 case L2CAP_CREATE_CHAN_REQ:
5551 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5554 case L2CAP_MOVE_CHAN_REQ:
5555 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5558 case L2CAP_MOVE_CHAN_RSP:
5559 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5562 case L2CAP_MOVE_CHAN_CFM:
5563 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5566 case L2CAP_MOVE_CHAN_CFM_RSP:
5567 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5571 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5579 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5580 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5583 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5584 struct l2cap_le_conn_rsp rsp;
5585 struct l2cap_chan *chan, *pchan;
5586 u16 dcid, scid, credits, mtu, mps;
5590 if (cmd_len != sizeof(*req))
5593 scid = __le16_to_cpu(req->scid);
5594 mtu = __le16_to_cpu(req->mtu);
5595 mps = __le16_to_cpu(req->mps);
5600 if (mtu < 23 || mps < 23)
5603 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5606 /* Check if we have socket listening on psm */
5607 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5608 &conn->hcon->dst, LE_LINK);
5610 result = L2CAP_CR_BAD_PSM;
5615 mutex_lock(&conn->chan_lock);
5616 l2cap_chan_lock(pchan);
5618 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5619 result = L2CAP_CR_AUTHENTICATION;
5621 goto response_unlock;
5624 /* Check if we already have channel with that dcid */
5625 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5626 result = L2CAP_CR_NO_MEM;
5628 goto response_unlock;
5631 chan = pchan->ops->new_connection(pchan);
5633 result = L2CAP_CR_NO_MEM;
5634 goto response_unlock;
5637 bacpy(&chan->src, &conn->hcon->src);
5638 bacpy(&chan->dst, &conn->hcon->dst);
5639 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5640 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5644 chan->remote_mps = mps;
5645 chan->tx_credits = __le16_to_cpu(req->credits);
5647 __l2cap_chan_add(conn, chan);
5649 credits = chan->rx_credits;
5651 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5653 chan->ident = cmd->ident;
5655 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5656 l2cap_state_change(chan, BT_CONNECT2);
5657 result = L2CAP_CR_PEND;
5658 chan->ops->defer(chan);
5660 l2cap_chan_ready(chan);
5661 result = L2CAP_CR_SUCCESS;
5665 l2cap_chan_unlock(pchan);
5666 mutex_unlock(&conn->chan_lock);
5668 if (result == L2CAP_CR_PEND)
5673 rsp.mtu = cpu_to_le16(chan->imtu);
5674 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
5680 rsp.dcid = cpu_to_le16(dcid);
5681 rsp.credits = cpu_to_le16(credits);
5682 rsp.result = cpu_to_le16(result);
5684 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5689 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5690 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5693 struct l2cap_le_credits *pkt;
5694 struct l2cap_chan *chan;
5697 if (cmd_len != sizeof(*pkt))
5700 pkt = (struct l2cap_le_credits *) data;
5701 cid = __le16_to_cpu(pkt->cid);
5702 credits = __le16_to_cpu(pkt->credits);
5704 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5706 chan = l2cap_get_chan_by_dcid(conn, cid);
5710 chan->tx_credits += credits;
5712 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5713 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5717 if (chan->tx_credits)
5718 chan->ops->resume(chan);
5720 l2cap_chan_unlock(chan);
5725 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5726 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5731 if (!enable_lecoc) {
5732 switch (cmd->code) {
5733 case L2CAP_LE_CONN_REQ:
5734 case L2CAP_LE_CONN_RSP:
5735 case L2CAP_LE_CREDITS:
5736 case L2CAP_DISCONN_REQ:
5737 case L2CAP_DISCONN_RSP:
5742 switch (cmd->code) {
5743 case L2CAP_COMMAND_REJ:
5746 case L2CAP_CONN_PARAM_UPDATE_REQ:
5747 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5750 case L2CAP_CONN_PARAM_UPDATE_RSP:
5753 case L2CAP_LE_CONN_RSP:
5754 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5757 case L2CAP_LE_CONN_REQ:
5758 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5761 case L2CAP_LE_CREDITS:
5762 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5765 case L2CAP_DISCONN_REQ:
5766 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5769 case L2CAP_DISCONN_RSP:
5770 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5774 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5782 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5783 struct sk_buff *skb)
5785 struct hci_conn *hcon = conn->hcon;
5786 struct l2cap_cmd_hdr *cmd;
5790 if (hcon->type != LE_LINK)
5793 if (skb->len < L2CAP_CMD_HDR_SIZE)
5796 cmd = (void *) skb->data;
5797 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5799 len = le16_to_cpu(cmd->len);
5801 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5803 if (len != skb->len || !cmd->ident) {
5804 BT_DBG("corrupted command");
5808 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5810 struct l2cap_cmd_rej_unk rej;
5812 BT_ERR("Wrong link type (%d)", err);
5814 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5815 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5823 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5824 struct sk_buff *skb)
5826 struct hci_conn *hcon = conn->hcon;
5827 u8 *data = skb->data;
5829 struct l2cap_cmd_hdr cmd;
5832 l2cap_raw_recv(conn, skb);
5834 if (hcon->type != ACL_LINK)
5837 while (len >= L2CAP_CMD_HDR_SIZE) {
5839 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5840 data += L2CAP_CMD_HDR_SIZE;
5841 len -= L2CAP_CMD_HDR_SIZE;
5843 cmd_len = le16_to_cpu(cmd.len);
5845 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5848 if (cmd_len > len || !cmd.ident) {
5849 BT_DBG("corrupted command");
5853 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5855 struct l2cap_cmd_rej_unk rej;
5857 BT_ERR("Wrong link type (%d)", err);
5859 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5860 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5872 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5874 u16 our_fcs, rcv_fcs;
5877 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5878 hdr_size = L2CAP_EXT_HDR_SIZE;
5880 hdr_size = L2CAP_ENH_HDR_SIZE;
5882 if (chan->fcs == L2CAP_FCS_CRC16) {
5883 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5884 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5885 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5887 if (our_fcs != rcv_fcs)
5893 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5895 struct l2cap_ctrl control;
5897 BT_DBG("chan %p", chan);
5899 memset(&control, 0, sizeof(control));
5902 control.reqseq = chan->buffer_seq;
5903 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5905 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5906 control.super = L2CAP_SUPER_RNR;
5907 l2cap_send_sframe(chan, &control);
5910 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5911 chan->unacked_frames > 0)
5912 __set_retrans_timer(chan);
5914 /* Send pending iframes */
5915 l2cap_ertm_send(chan);
5917 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5918 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5919 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5922 control.super = L2CAP_SUPER_RR;
5923 l2cap_send_sframe(chan, &control);
5927 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5928 struct sk_buff **last_frag)
5930 /* skb->len reflects data in skb as well as all fragments
5931 * skb->data_len reflects only data in fragments
5933 if (!skb_has_frag_list(skb))
5934 skb_shinfo(skb)->frag_list = new_frag;
5936 new_frag->next = NULL;
5938 (*last_frag)->next = new_frag;
5939 *last_frag = new_frag;
5941 skb->len += new_frag->len;
5942 skb->data_len += new_frag->len;
5943 skb->truesize += new_frag->truesize;
5946 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5947 struct l2cap_ctrl *control)
5951 switch (control->sar) {
5952 case L2CAP_SAR_UNSEGMENTED:
5956 err = chan->ops->recv(chan, skb);
5959 case L2CAP_SAR_START:
5963 chan->sdu_len = get_unaligned_le16(skb->data);
5964 skb_pull(skb, L2CAP_SDULEN_SIZE);
5966 if (chan->sdu_len > chan->imtu) {
5971 if (skb->len >= chan->sdu_len)
5975 chan->sdu_last_frag = skb;
5981 case L2CAP_SAR_CONTINUE:
5985 append_skb_frag(chan->sdu, skb,
5986 &chan->sdu_last_frag);
5989 if (chan->sdu->len >= chan->sdu_len)
5999 append_skb_frag(chan->sdu, skb,
6000 &chan->sdu_last_frag);
6003 if (chan->sdu->len != chan->sdu_len)
6006 err = chan->ops->recv(chan, chan->sdu);
6009 /* Reassembly complete */
6011 chan->sdu_last_frag = NULL;
6019 kfree_skb(chan->sdu);
6021 chan->sdu_last_frag = NULL;
6028 static int l2cap_resegment(struct l2cap_chan *chan)
6034 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6038 if (chan->mode != L2CAP_MODE_ERTM)
6041 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6042 l2cap_tx(chan, NULL, NULL, event);
6045 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6048 /* Pass sequential frames to l2cap_reassemble_sdu()
6049 * until a gap is encountered.
6052 BT_DBG("chan %p", chan);
6054 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6055 struct sk_buff *skb;
6056 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6057 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6059 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6064 skb_unlink(skb, &chan->srej_q);
6065 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6066 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6071 if (skb_queue_empty(&chan->srej_q)) {
6072 chan->rx_state = L2CAP_RX_STATE_RECV;
6073 l2cap_send_ack(chan);
6079 static void l2cap_handle_srej(struct l2cap_chan *chan,
6080 struct l2cap_ctrl *control)
6082 struct sk_buff *skb;
6084 BT_DBG("chan %p, control %p", chan, control);
6086 if (control->reqseq == chan->next_tx_seq) {
6087 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6088 l2cap_send_disconn_req(chan, ECONNRESET);
6092 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6095 BT_DBG("Seq %d not available for retransmission",
6100 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6101 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6102 l2cap_send_disconn_req(chan, ECONNRESET);
6106 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6108 if (control->poll) {
6109 l2cap_pass_to_tx(chan, control);
6111 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6112 l2cap_retransmit(chan, control);
6113 l2cap_ertm_send(chan);
6115 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6116 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6117 chan->srej_save_reqseq = control->reqseq;
6120 l2cap_pass_to_tx_fbit(chan, control);
6122 if (control->final) {
6123 if (chan->srej_save_reqseq != control->reqseq ||
6124 !test_and_clear_bit(CONN_SREJ_ACT,
6126 l2cap_retransmit(chan, control);
6128 l2cap_retransmit(chan, control);
6129 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6130 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6131 chan->srej_save_reqseq = control->reqseq;
6137 static void l2cap_handle_rej(struct l2cap_chan *chan,
6138 struct l2cap_ctrl *control)
6140 struct sk_buff *skb;
6142 BT_DBG("chan %p, control %p", chan, control);
6144 if (control->reqseq == chan->next_tx_seq) {
6145 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6146 l2cap_send_disconn_req(chan, ECONNRESET);
6150 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6152 if (chan->max_tx && skb &&
6153 bt_cb(skb)->control.retries >= chan->max_tx) {
6154 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6155 l2cap_send_disconn_req(chan, ECONNRESET);
6159 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6161 l2cap_pass_to_tx(chan, control);
6163 if (control->final) {
6164 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6165 l2cap_retransmit_all(chan, control);
6167 l2cap_retransmit_all(chan, control);
6168 l2cap_ertm_send(chan);
6169 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6170 set_bit(CONN_REJ_ACT, &chan->conn_state);
6174 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6176 BT_DBG("chan %p, txseq %d", chan, txseq);
6178 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6179 chan->expected_tx_seq);
6181 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6182 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6184 /* See notes below regarding "double poll" and
6187 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6188 BT_DBG("Invalid/Ignore - after SREJ");
6189 return L2CAP_TXSEQ_INVALID_IGNORE;
6191 BT_DBG("Invalid - in window after SREJ sent");
6192 return L2CAP_TXSEQ_INVALID;
6196 if (chan->srej_list.head == txseq) {
6197 BT_DBG("Expected SREJ");
6198 return L2CAP_TXSEQ_EXPECTED_SREJ;
6201 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6202 BT_DBG("Duplicate SREJ - txseq already stored");
6203 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6206 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6207 BT_DBG("Unexpected SREJ - not requested");
6208 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6212 if (chan->expected_tx_seq == txseq) {
6213 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6215 BT_DBG("Invalid - txseq outside tx window");
6216 return L2CAP_TXSEQ_INVALID;
6219 return L2CAP_TXSEQ_EXPECTED;
6223 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6224 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6225 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6226 return L2CAP_TXSEQ_DUPLICATE;
6229 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6230 /* A source of invalid packets is a "double poll" condition,
6231 * where delays cause us to send multiple poll packets. If
6232 * the remote stack receives and processes both polls,
6233 * sequence numbers can wrap around in such a way that a
6234 * resent frame has a sequence number that looks like new data
6235 * with a sequence gap. This would trigger an erroneous SREJ
6238 * Fortunately, this is impossible with a tx window that's
6239 * less than half of the maximum sequence number, which allows
6240 * invalid frames to be safely ignored.
6242 * With tx window sizes greater than half of the tx window
6243 * maximum, the frame is invalid and cannot be ignored. This
6244 * causes a disconnect.
6247 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6248 BT_DBG("Invalid/Ignore - txseq outside tx window");
6249 return L2CAP_TXSEQ_INVALID_IGNORE;
6251 BT_DBG("Invalid - txseq outside tx window");
6252 return L2CAP_TXSEQ_INVALID;
6255 BT_DBG("Unexpected - txseq indicates missing frames");
6256 return L2CAP_TXSEQ_UNEXPECTED;
6260 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6261 struct l2cap_ctrl *control,
6262 struct sk_buff *skb, u8 event)
6265 bool skb_in_use = false;
6267 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6271 case L2CAP_EV_RECV_IFRAME:
6272 switch (l2cap_classify_txseq(chan, control->txseq)) {
6273 case L2CAP_TXSEQ_EXPECTED:
6274 l2cap_pass_to_tx(chan, control);
6276 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6277 BT_DBG("Busy, discarding expected seq %d",
6282 chan->expected_tx_seq = __next_seq(chan,
6285 chan->buffer_seq = chan->expected_tx_seq;
6288 err = l2cap_reassemble_sdu(chan, skb, control);
6292 if (control->final) {
6293 if (!test_and_clear_bit(CONN_REJ_ACT,
6294 &chan->conn_state)) {
6296 l2cap_retransmit_all(chan, control);
6297 l2cap_ertm_send(chan);
6301 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6302 l2cap_send_ack(chan);
6304 case L2CAP_TXSEQ_UNEXPECTED:
6305 l2cap_pass_to_tx(chan, control);
6307 /* Can't issue SREJ frames in the local busy state.
6308 * Drop this frame, it will be seen as missing
6309 * when local busy is exited.
6311 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6312 BT_DBG("Busy, discarding unexpected seq %d",
6317 /* There was a gap in the sequence, so an SREJ
6318 * must be sent for each missing frame. The
6319 * current frame is stored for later use.
6321 skb_queue_tail(&chan->srej_q, skb);
6323 BT_DBG("Queued %p (queue len %d)", skb,
6324 skb_queue_len(&chan->srej_q));
6326 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6327 l2cap_seq_list_clear(&chan->srej_list);
6328 l2cap_send_srej(chan, control->txseq);
6330 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6332 case L2CAP_TXSEQ_DUPLICATE:
6333 l2cap_pass_to_tx(chan, control);
6335 case L2CAP_TXSEQ_INVALID_IGNORE:
6337 case L2CAP_TXSEQ_INVALID:
6339 l2cap_send_disconn_req(chan, ECONNRESET);
6343 case L2CAP_EV_RECV_RR:
6344 l2cap_pass_to_tx(chan, control);
6345 if (control->final) {
6346 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6348 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6349 !__chan_is_moving(chan)) {
6351 l2cap_retransmit_all(chan, control);
6354 l2cap_ertm_send(chan);
6355 } else if (control->poll) {
6356 l2cap_send_i_or_rr_or_rnr(chan);
6358 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6359 &chan->conn_state) &&
6360 chan->unacked_frames)
6361 __set_retrans_timer(chan);
6363 l2cap_ertm_send(chan);
6366 case L2CAP_EV_RECV_RNR:
6367 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6368 l2cap_pass_to_tx(chan, control);
6369 if (control && control->poll) {
6370 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6371 l2cap_send_rr_or_rnr(chan, 0);
6373 __clear_retrans_timer(chan);
6374 l2cap_seq_list_clear(&chan->retrans_list);
6376 case L2CAP_EV_RECV_REJ:
6377 l2cap_handle_rej(chan, control);
6379 case L2CAP_EV_RECV_SREJ:
6380 l2cap_handle_srej(chan, control);
6386 if (skb && !skb_in_use) {
6387 BT_DBG("Freeing %p", skb);
6394 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6395 struct l2cap_ctrl *control,
6396 struct sk_buff *skb, u8 event)
6399 u16 txseq = control->txseq;
6400 bool skb_in_use = false;
6402 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6406 case L2CAP_EV_RECV_IFRAME:
6407 switch (l2cap_classify_txseq(chan, txseq)) {
6408 case L2CAP_TXSEQ_EXPECTED:
6409 /* Keep frame for reassembly later */
6410 l2cap_pass_to_tx(chan, control);
6411 skb_queue_tail(&chan->srej_q, skb);
6413 BT_DBG("Queued %p (queue len %d)", skb,
6414 skb_queue_len(&chan->srej_q));
6416 chan->expected_tx_seq = __next_seq(chan, txseq);
6418 case L2CAP_TXSEQ_EXPECTED_SREJ:
6419 l2cap_seq_list_pop(&chan->srej_list);
6421 l2cap_pass_to_tx(chan, control);
6422 skb_queue_tail(&chan->srej_q, skb);
6424 BT_DBG("Queued %p (queue len %d)", skb,
6425 skb_queue_len(&chan->srej_q));
6427 err = l2cap_rx_queued_iframes(chan);
6432 case L2CAP_TXSEQ_UNEXPECTED:
6433 /* Got a frame that can't be reassembled yet.
6434 * Save it for later, and send SREJs to cover
6435 * the missing frames.
6437 skb_queue_tail(&chan->srej_q, skb);
6439 BT_DBG("Queued %p (queue len %d)", skb,
6440 skb_queue_len(&chan->srej_q));
6442 l2cap_pass_to_tx(chan, control);
6443 l2cap_send_srej(chan, control->txseq);
6445 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6446 /* This frame was requested with an SREJ, but
6447 * some expected retransmitted frames are
6448 * missing. Request retransmission of missing
6451 skb_queue_tail(&chan->srej_q, skb);
6453 BT_DBG("Queued %p (queue len %d)", skb,
6454 skb_queue_len(&chan->srej_q));
6456 l2cap_pass_to_tx(chan, control);
6457 l2cap_send_srej_list(chan, control->txseq);
6459 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6460 /* We've already queued this frame. Drop this copy. */
6461 l2cap_pass_to_tx(chan, control);
6463 case L2CAP_TXSEQ_DUPLICATE:
6464 /* Expecting a later sequence number, so this frame
6465 * was already received. Ignore it completely.
6468 case L2CAP_TXSEQ_INVALID_IGNORE:
6470 case L2CAP_TXSEQ_INVALID:
6472 l2cap_send_disconn_req(chan, ECONNRESET);
6476 case L2CAP_EV_RECV_RR:
6477 l2cap_pass_to_tx(chan, control);
6478 if (control->final) {
6479 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6481 if (!test_and_clear_bit(CONN_REJ_ACT,
6482 &chan->conn_state)) {
6484 l2cap_retransmit_all(chan, control);
6487 l2cap_ertm_send(chan);
6488 } else if (control->poll) {
6489 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6490 &chan->conn_state) &&
6491 chan->unacked_frames) {
6492 __set_retrans_timer(chan);
6495 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6496 l2cap_send_srej_tail(chan);
6498 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6499 &chan->conn_state) &&
6500 chan->unacked_frames)
6501 __set_retrans_timer(chan);
6503 l2cap_send_ack(chan);
6506 case L2CAP_EV_RECV_RNR:
6507 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6508 l2cap_pass_to_tx(chan, control);
6509 if (control->poll) {
6510 l2cap_send_srej_tail(chan);
6512 struct l2cap_ctrl rr_control;
6513 memset(&rr_control, 0, sizeof(rr_control));
6514 rr_control.sframe = 1;
6515 rr_control.super = L2CAP_SUPER_RR;
6516 rr_control.reqseq = chan->buffer_seq;
6517 l2cap_send_sframe(chan, &rr_control);
6521 case L2CAP_EV_RECV_REJ:
6522 l2cap_handle_rej(chan, control);
6524 case L2CAP_EV_RECV_SREJ:
6525 l2cap_handle_srej(chan, control);
6529 if (skb && !skb_in_use) {
6530 BT_DBG("Freeing %p", skb);
6537 static int l2cap_finish_move(struct l2cap_chan *chan)
6539 BT_DBG("chan %p", chan);
6541 chan->rx_state = L2CAP_RX_STATE_RECV;
6544 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6546 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6548 return l2cap_resegment(chan);
6551 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6552 struct l2cap_ctrl *control,
6553 struct sk_buff *skb, u8 event)
6557 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6563 l2cap_process_reqseq(chan, control->reqseq);
6565 if (!skb_queue_empty(&chan->tx_q))
6566 chan->tx_send_head = skb_peek(&chan->tx_q);
6568 chan->tx_send_head = NULL;
6570 /* Rewind next_tx_seq to the point expected
6573 chan->next_tx_seq = control->reqseq;
6574 chan->unacked_frames = 0;
6576 err = l2cap_finish_move(chan);
6580 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6581 l2cap_send_i_or_rr_or_rnr(chan);
6583 if (event == L2CAP_EV_RECV_IFRAME)
6586 return l2cap_rx_state_recv(chan, control, NULL, event);
6589 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6590 struct l2cap_ctrl *control,
6591 struct sk_buff *skb, u8 event)
6595 if (!control->final)
6598 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6600 chan->rx_state = L2CAP_RX_STATE_RECV;
6601 l2cap_process_reqseq(chan, control->reqseq);
6603 if (!skb_queue_empty(&chan->tx_q))
6604 chan->tx_send_head = skb_peek(&chan->tx_q);
6606 chan->tx_send_head = NULL;
6608 /* Rewind next_tx_seq to the point expected
6611 chan->next_tx_seq = control->reqseq;
6612 chan->unacked_frames = 0;
6615 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6617 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6619 err = l2cap_resegment(chan);
6622 err = l2cap_rx_state_recv(chan, control, skb, event);
6627 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6629 /* Make sure reqseq is for a packet that has been sent but not acked */
6632 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6633 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6636 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6637 struct sk_buff *skb, u8 event)
6641 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6642 control, skb, event, chan->rx_state);
6644 if (__valid_reqseq(chan, control->reqseq)) {
6645 switch (chan->rx_state) {
6646 case L2CAP_RX_STATE_RECV:
6647 err = l2cap_rx_state_recv(chan, control, skb, event);
6649 case L2CAP_RX_STATE_SREJ_SENT:
6650 err = l2cap_rx_state_srej_sent(chan, control, skb,
6653 case L2CAP_RX_STATE_WAIT_P:
6654 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6656 case L2CAP_RX_STATE_WAIT_F:
6657 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6664 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6665 control->reqseq, chan->next_tx_seq,
6666 chan->expected_ack_seq);
6667 l2cap_send_disconn_req(chan, ECONNRESET);
6673 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6674 struct sk_buff *skb)
6678 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6681 if (l2cap_classify_txseq(chan, control->txseq) ==
6682 L2CAP_TXSEQ_EXPECTED) {
6683 l2cap_pass_to_tx(chan, control);
6685 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6686 __next_seq(chan, chan->buffer_seq));
6688 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6690 l2cap_reassemble_sdu(chan, skb, control);
6693 kfree_skb(chan->sdu);
6696 chan->sdu_last_frag = NULL;
6700 BT_DBG("Freeing %p", skb);
6705 chan->last_acked_seq = control->txseq;
6706 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6711 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6713 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6717 __unpack_control(chan, skb);
6722 * We can just drop the corrupted I-frame here.
6723 * Receiver will miss it and start proper recovery
6724 * procedures and ask for retransmission.
6726 if (l2cap_check_fcs(chan, skb))
6729 if (!control->sframe && control->sar == L2CAP_SAR_START)
6730 len -= L2CAP_SDULEN_SIZE;
6732 if (chan->fcs == L2CAP_FCS_CRC16)
6733 len -= L2CAP_FCS_SIZE;
6735 if (len > chan->mps) {
6736 l2cap_send_disconn_req(chan, ECONNRESET);
6740 if (!control->sframe) {
6743 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6744 control->sar, control->reqseq, control->final,
6747 /* Validate F-bit - F=0 always valid, F=1 only
6748 * valid in TX WAIT_F
6750 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6753 if (chan->mode != L2CAP_MODE_STREAMING) {
6754 event = L2CAP_EV_RECV_IFRAME;
6755 err = l2cap_rx(chan, control, skb, event);
6757 err = l2cap_stream_rx(chan, control, skb);
6761 l2cap_send_disconn_req(chan, ECONNRESET);
6763 const u8 rx_func_to_event[4] = {
6764 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6765 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6768 /* Only I-frames are expected in streaming mode */
6769 if (chan->mode == L2CAP_MODE_STREAMING)
6772 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6773 control->reqseq, control->final, control->poll,
6777 BT_ERR("Trailing bytes: %d in sframe", len);
6778 l2cap_send_disconn_req(chan, ECONNRESET);
6782 /* Validate F and P bits */
6783 if (control->final && (control->poll ||
6784 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6787 event = rx_func_to_event[control->super];
6788 if (l2cap_rx(chan, control, skb, event))
6789 l2cap_send_disconn_req(chan, ECONNRESET);
6799 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6801 struct l2cap_conn *conn = chan->conn;
6802 struct l2cap_le_credits pkt;
6805 /* We return more credits to the sender only after the amount of
6806 * credits falls below half of the initial amount.
6808 if (chan->rx_credits >= (L2CAP_LE_MAX_CREDITS + 1) / 2)
6811 return_credits = L2CAP_LE_MAX_CREDITS - chan->rx_credits;
6813 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6815 chan->rx_credits += return_credits;
6817 pkt.cid = cpu_to_le16(chan->scid);
6818 pkt.credits = cpu_to_le16(return_credits);
6820 chan->ident = l2cap_get_ident(conn);
6822 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6825 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6829 if (!chan->rx_credits) {
6830 BT_ERR("No credits to receive LE L2CAP data");
6834 if (chan->imtu < skb->len) {
6835 BT_ERR("Too big LE L2CAP PDU");
6840 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6842 l2cap_chan_le_send_credits(chan);
6849 sdu_len = get_unaligned_le16(skb->data);
6850 skb_pull(skb, L2CAP_SDULEN_SIZE);
6852 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6853 sdu_len, skb->len, chan->imtu);
6855 if (sdu_len > chan->imtu) {
6856 BT_ERR("Too big LE L2CAP SDU length received");
6861 if (skb->len > sdu_len) {
6862 BT_ERR("Too much LE L2CAP data received");
6867 if (skb->len == sdu_len)
6868 return chan->ops->recv(chan, skb);
6871 chan->sdu_len = sdu_len;
6872 chan->sdu_last_frag = skb;
6877 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6878 chan->sdu->len, skb->len, chan->sdu_len);
6880 if (chan->sdu->len + skb->len > chan->sdu_len) {
6881 BT_ERR("Too much LE L2CAP data received");
6886 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6889 if (chan->sdu->len == chan->sdu_len) {
6890 err = chan->ops->recv(chan, chan->sdu);
6893 chan->sdu_last_frag = NULL;
6901 kfree_skb(chan->sdu);
6903 chan->sdu_last_frag = NULL;
6907 /* We can't return an error here since we took care of the skb
6908 * freeing internally. An error return would cause the caller to
6909 * do a double-free of the skb.
6914 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6915 struct sk_buff *skb)
6917 struct l2cap_chan *chan;
6919 chan = l2cap_get_chan_by_scid(conn, cid);
6921 if (cid == L2CAP_CID_A2MP) {
6922 chan = a2mp_channel_create(conn, skb);
6928 l2cap_chan_lock(chan);
6930 BT_DBG("unknown cid 0x%4.4x", cid);
6931 /* Drop packet and return */
6937 BT_DBG("chan %p, len %d", chan, skb->len);
6939 if (chan->state != BT_CONNECTED)
6942 switch (chan->mode) {
6943 case L2CAP_MODE_LE_FLOWCTL:
6944 if (l2cap_le_data_rcv(chan, skb) < 0)
6949 case L2CAP_MODE_BASIC:
6950 /* If socket recv buffers overflows we drop data here
6951 * which is *bad* because L2CAP has to be reliable.
6952 * But we don't have any other choice. L2CAP doesn't
6953 * provide flow control mechanism. */
6955 if (chan->imtu < skb->len)
6958 if (!chan->ops->recv(chan, skb))
6962 case L2CAP_MODE_ERTM:
6963 case L2CAP_MODE_STREAMING:
6964 l2cap_data_rcv(chan, skb);
6968 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6976 l2cap_chan_unlock(chan);
6979 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6980 struct sk_buff *skb)
6982 struct hci_conn *hcon = conn->hcon;
6983 struct l2cap_chan *chan;
6985 if (hcon->type != ACL_LINK)
6988 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6993 BT_DBG("chan %p, len %d", chan, skb->len);
6995 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6998 if (chan->imtu < skb->len)
7001 /* Store remote BD_ADDR and PSM for msg_name */
7002 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7003 bt_cb(skb)->psm = psm;
7005 if (!chan->ops->recv(chan, skb))
7012 static void l2cap_att_channel(struct l2cap_conn *conn,
7013 struct sk_buff *skb)
7015 struct hci_conn *hcon = conn->hcon;
7016 struct l2cap_chan *chan;
7018 if (hcon->type != LE_LINK)
7021 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7022 &hcon->src, &hcon->dst);
7026 BT_DBG("chan %p, len %d", chan, skb->len);
7028 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7031 if (chan->imtu < skb->len)
7034 if (!chan->ops->recv(chan, skb))
7041 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7043 struct l2cap_hdr *lh = (void *) skb->data;
7047 skb_pull(skb, L2CAP_HDR_SIZE);
7048 cid = __le16_to_cpu(lh->cid);
7049 len = __le16_to_cpu(lh->len);
7051 if (len != skb->len) {
7056 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7059 case L2CAP_CID_SIGNALING:
7060 l2cap_sig_channel(conn, skb);
7063 case L2CAP_CID_CONN_LESS:
7064 psm = get_unaligned((__le16 *) skb->data);
7065 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7066 l2cap_conless_channel(conn, psm, skb);
7070 l2cap_att_channel(conn, skb);
7073 case L2CAP_CID_LE_SIGNALING:
7074 l2cap_le_sig_channel(conn, skb);
7078 if (smp_sig_channel(conn, skb))
7079 l2cap_conn_del(conn->hcon, EACCES);
7083 l2cap_data_channel(conn, cid, skb);
7088 /* ---- L2CAP interface with lower layer (HCI) ---- */
7090 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7092 int exact = 0, lm1 = 0, lm2 = 0;
7093 struct l2cap_chan *c;
7095 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7097 /* Find listening sockets and check their link_mode */
7098 read_lock(&chan_list_lock);
7099 list_for_each_entry(c, &chan_list, global_l) {
7100 if (c->state != BT_LISTEN)
7103 if (!bacmp(&c->src, &hdev->bdaddr)) {
7104 lm1 |= HCI_LM_ACCEPT;
7105 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7106 lm1 |= HCI_LM_MASTER;
7108 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7109 lm2 |= HCI_LM_ACCEPT;
7110 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7111 lm2 |= HCI_LM_MASTER;
7114 read_unlock(&chan_list_lock);
7116 return exact ? lm1 : lm2;
7119 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7121 struct l2cap_conn *conn;
7123 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7126 conn = l2cap_conn_add(hcon);
7128 l2cap_conn_ready(conn);
7130 l2cap_conn_del(hcon, bt_to_errno(status));
7134 int l2cap_disconn_ind(struct hci_conn *hcon)
7136 struct l2cap_conn *conn = hcon->l2cap_data;
7138 BT_DBG("hcon %p", hcon);
7141 return HCI_ERROR_REMOTE_USER_TERM;
7142 return conn->disc_reason;
7145 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7147 BT_DBG("hcon %p reason %d", hcon, reason);
7149 l2cap_conn_del(hcon, bt_to_errno(reason));
7152 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7154 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7157 if (encrypt == 0x00) {
7158 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7159 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7160 } else if (chan->sec_level == BT_SECURITY_HIGH)
7161 l2cap_chan_close(chan, ECONNREFUSED);
7163 if (chan->sec_level == BT_SECURITY_MEDIUM)
7164 __clear_chan_timer(chan);
7168 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7170 struct l2cap_conn *conn = hcon->l2cap_data;
7171 struct l2cap_chan *chan;
7176 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7178 if (hcon->type == LE_LINK) {
7179 if (!status && encrypt)
7180 smp_distribute_keys(conn, 0);
7181 cancel_delayed_work(&conn->security_timer);
7184 mutex_lock(&conn->chan_lock);
7186 list_for_each_entry(chan, &conn->chan_l, list) {
7187 l2cap_chan_lock(chan);
7189 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7190 state_to_string(chan->state));
7192 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7193 l2cap_chan_unlock(chan);
7197 if (chan->scid == L2CAP_CID_ATT) {
7198 if (!status && encrypt) {
7199 chan->sec_level = hcon->sec_level;
7200 l2cap_chan_ready(chan);
7203 l2cap_chan_unlock(chan);
7207 if (!__l2cap_no_conn_pending(chan)) {
7208 l2cap_chan_unlock(chan);
7212 if (!status && (chan->state == BT_CONNECTED ||
7213 chan->state == BT_CONFIG)) {
7214 chan->ops->resume(chan);
7215 l2cap_check_encryption(chan, encrypt);
7216 l2cap_chan_unlock(chan);
7220 if (chan->state == BT_CONNECT) {
7222 l2cap_start_connection(chan);
7224 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7225 } else if (chan->state == BT_CONNECT2) {
7226 struct l2cap_conn_rsp rsp;
7230 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7231 res = L2CAP_CR_PEND;
7232 stat = L2CAP_CS_AUTHOR_PEND;
7233 chan->ops->defer(chan);
7235 l2cap_state_change(chan, BT_CONFIG);
7236 res = L2CAP_CR_SUCCESS;
7237 stat = L2CAP_CS_NO_INFO;
7240 l2cap_state_change(chan, BT_DISCONN);
7241 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7242 res = L2CAP_CR_SEC_BLOCK;
7243 stat = L2CAP_CS_NO_INFO;
7246 rsp.scid = cpu_to_le16(chan->dcid);
7247 rsp.dcid = cpu_to_le16(chan->scid);
7248 rsp.result = cpu_to_le16(res);
7249 rsp.status = cpu_to_le16(stat);
7250 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7253 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7254 res == L2CAP_CR_SUCCESS) {
7256 set_bit(CONF_REQ_SENT, &chan->conf_state);
7257 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7259 l2cap_build_conf_req(chan, buf),
7261 chan->num_conf_req++;
7265 l2cap_chan_unlock(chan);
7268 mutex_unlock(&conn->chan_lock);
7273 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7275 struct l2cap_conn *conn = hcon->l2cap_data;
7276 struct l2cap_hdr *hdr;
7279 /* For AMP controller do not create l2cap conn */
7280 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7284 conn = l2cap_conn_add(hcon);
7289 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7293 case ACL_START_NO_FLUSH:
7296 BT_ERR("Unexpected start frame (len %d)", skb->len);
7297 kfree_skb(conn->rx_skb);
7298 conn->rx_skb = NULL;
7300 l2cap_conn_unreliable(conn, ECOMM);
7303 /* Start fragment always begin with Basic L2CAP header */
7304 if (skb->len < L2CAP_HDR_SIZE) {
7305 BT_ERR("Frame is too short (len %d)", skb->len);
7306 l2cap_conn_unreliable(conn, ECOMM);
7310 hdr = (struct l2cap_hdr *) skb->data;
7311 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7313 if (len == skb->len) {
7314 /* Complete frame received */
7315 l2cap_recv_frame(conn, skb);
7319 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7321 if (skb->len > len) {
7322 BT_ERR("Frame is too long (len %d, expected len %d)",
7324 l2cap_conn_unreliable(conn, ECOMM);
7328 /* Allocate skb for the complete frame (with header) */
7329 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7333 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7335 conn->rx_len = len - skb->len;
7339 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7341 if (!conn->rx_len) {
7342 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7343 l2cap_conn_unreliable(conn, ECOMM);
7347 if (skb->len > conn->rx_len) {
7348 BT_ERR("Fragment is too long (len %d, expected %d)",
7349 skb->len, conn->rx_len);
7350 kfree_skb(conn->rx_skb);
7351 conn->rx_skb = NULL;
7353 l2cap_conn_unreliable(conn, ECOMM);
7357 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7359 conn->rx_len -= skb->len;
7361 if (!conn->rx_len) {
7362 /* Complete frame received. l2cap_recv_frame
7363 * takes ownership of the skb so set the global
7364 * rx_skb pointer to NULL first.
7366 struct sk_buff *rx_skb = conn->rx_skb;
7367 conn->rx_skb = NULL;
7368 l2cap_recv_frame(conn, rx_skb);
7378 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7380 struct l2cap_chan *c;
7382 read_lock(&chan_list_lock);
7384 list_for_each_entry(c, &chan_list, global_l) {
7385 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7387 c->state, __le16_to_cpu(c->psm),
7388 c->scid, c->dcid, c->imtu, c->omtu,
7389 c->sec_level, c->mode);
7392 read_unlock(&chan_list_lock);
7397 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7399 return single_open(file, l2cap_debugfs_show, inode->i_private);
7402 static const struct file_operations l2cap_debugfs_fops = {
7403 .open = l2cap_debugfs_open,
7405 .llseek = seq_lseek,
7406 .release = single_release,
7409 static struct dentry *l2cap_debugfs;
7411 int __init l2cap_init(void)
7415 err = l2cap_init_sockets();
7419 if (IS_ERR_OR_NULL(bt_debugfs))
7422 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7423 NULL, &l2cap_debugfs_fops);
7428 void l2cap_exit(void)
7430 debugfs_remove(l2cap_debugfs);
7431 l2cap_cleanup_sockets();
7434 module_param(disable_ertm, bool, 0644);
7435 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");