2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
495 chan->imtu = L2CAP_DEFAULT_MTU;
496 chan->omtu = L2CAP_LE_MIN_MTU;
497 chan->mode = L2CAP_MODE_LE_FLOWCTL;
498 chan->tx_credits = 0;
499 chan->rx_credits = L2CAP_LE_MAX_CREDITS;
502 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
504 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
505 __le16_to_cpu(chan->psm), chan->dcid);
507 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
511 switch (chan->chan_type) {
512 case L2CAP_CHAN_CONN_ORIENTED:
513 if (conn->hcon->type == LE_LINK) {
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 if (chan->dcid == L2CAP_CID_ATT)
517 chan->scid = L2CAP_CID_ATT;
519 chan->scid = l2cap_alloc_cid(conn);
521 /* Alloc CID for connection-oriented socket */
522 chan->scid = l2cap_alloc_cid(conn);
523 chan->omtu = L2CAP_DEFAULT_MTU;
527 case L2CAP_CHAN_CONN_LESS:
528 /* Connectionless socket */
529 chan->scid = L2CAP_CID_CONN_LESS;
530 chan->dcid = L2CAP_CID_CONN_LESS;
531 chan->omtu = L2CAP_DEFAULT_MTU;
534 case L2CAP_CHAN_CONN_FIX_A2MP:
535 chan->scid = L2CAP_CID_A2MP;
536 chan->dcid = L2CAP_CID_A2MP;
537 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
538 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
542 /* Raw socket can send/recv signalling messages only */
543 chan->scid = L2CAP_CID_SIGNALING;
544 chan->dcid = L2CAP_CID_SIGNALING;
545 chan->omtu = L2CAP_DEFAULT_MTU;
548 chan->local_id = L2CAP_BESTEFFORT_ID;
549 chan->local_stype = L2CAP_SERV_BESTEFFORT;
550 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
551 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
552 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
553 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
555 l2cap_chan_hold(chan);
557 hci_conn_hold(conn->hcon);
559 list_add(&chan->list, &conn->chan_l);
562 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
564 mutex_lock(&conn->chan_lock);
565 __l2cap_chan_add(conn, chan);
566 mutex_unlock(&conn->chan_lock);
569 void l2cap_chan_del(struct l2cap_chan *chan, int err)
571 struct l2cap_conn *conn = chan->conn;
573 __clear_chan_timer(chan);
575 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 /* Delete from channel list */
580 list_del(&chan->list);
582 l2cap_chan_put(chan);
586 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
587 hci_conn_drop(conn->hcon);
589 if (mgr && mgr->bredr_chan == chan)
590 mgr->bredr_chan = NULL;
593 if (chan->hs_hchan) {
594 struct hci_chan *hs_hchan = chan->hs_hchan;
596 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
597 amp_disconnect_logical_link(hs_hchan);
600 chan->ops->teardown(chan, err);
602 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
606 case L2CAP_MODE_BASIC:
609 case L2CAP_MODE_LE_FLOWCTL:
610 skb_queue_purge(&chan->tx_q);
613 case L2CAP_MODE_ERTM:
614 __clear_retrans_timer(chan);
615 __clear_monitor_timer(chan);
616 __clear_ack_timer(chan);
618 skb_queue_purge(&chan->srej_q);
620 l2cap_seq_list_free(&chan->srej_list);
621 l2cap_seq_list_free(&chan->retrans_list);
625 case L2CAP_MODE_STREAMING:
626 skb_queue_purge(&chan->tx_q);
633 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
635 struct l2cap_conn *conn = chan->conn;
636 struct l2cap_le_conn_rsp rsp;
639 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
640 result = L2CAP_CR_AUTHORIZATION;
642 result = L2CAP_CR_BAD_PSM;
644 l2cap_state_change(chan, BT_DISCONN);
646 rsp.dcid = cpu_to_le16(chan->scid);
647 rsp.mtu = cpu_to_le16(chan->imtu);
648 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
649 rsp.credits = cpu_to_le16(chan->rx_credits);
650 rsp.result = cpu_to_le16(result);
652 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
656 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
658 struct l2cap_conn *conn = chan->conn;
659 struct l2cap_conn_rsp rsp;
662 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 result = L2CAP_CR_SEC_BLOCK;
665 result = L2CAP_CR_BAD_PSM;
667 l2cap_state_change(chan, BT_DISCONN);
669 rsp.scid = cpu_to_le16(chan->dcid);
670 rsp.dcid = cpu_to_le16(chan->scid);
671 rsp.result = cpu_to_le16(result);
672 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
674 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
677 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
679 struct l2cap_conn *conn = chan->conn;
681 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
683 switch (chan->state) {
685 chan->ops->teardown(chan, 0);
690 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
691 * check for chan->psm.
693 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
694 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
695 l2cap_send_disconn_req(chan, reason);
697 l2cap_chan_del(chan, reason);
701 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
702 if (conn->hcon->type == ACL_LINK)
703 l2cap_chan_connect_reject(chan);
704 else if (conn->hcon->type == LE_LINK)
705 l2cap_chan_le_connect_reject(chan);
708 l2cap_chan_del(chan, reason);
713 l2cap_chan_del(chan, reason);
717 chan->ops->teardown(chan, 0);
722 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
724 switch (chan->chan_type) {
726 switch (chan->sec_level) {
727 case BT_SECURITY_HIGH:
728 return HCI_AT_DEDICATED_BONDING_MITM;
729 case BT_SECURITY_MEDIUM:
730 return HCI_AT_DEDICATED_BONDING;
732 return HCI_AT_NO_BONDING;
735 case L2CAP_CHAN_CONN_LESS:
736 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
737 if (chan->sec_level == BT_SECURITY_LOW)
738 chan->sec_level = BT_SECURITY_SDP;
740 if (chan->sec_level == BT_SECURITY_HIGH)
741 return HCI_AT_NO_BONDING_MITM;
743 return HCI_AT_NO_BONDING;
745 case L2CAP_CHAN_CONN_ORIENTED:
746 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
747 if (chan->sec_level == BT_SECURITY_LOW)
748 chan->sec_level = BT_SECURITY_SDP;
750 if (chan->sec_level == BT_SECURITY_HIGH)
751 return HCI_AT_NO_BONDING_MITM;
753 return HCI_AT_NO_BONDING;
757 switch (chan->sec_level) {
758 case BT_SECURITY_HIGH:
759 return HCI_AT_GENERAL_BONDING_MITM;
760 case BT_SECURITY_MEDIUM:
761 return HCI_AT_GENERAL_BONDING;
763 return HCI_AT_NO_BONDING;
769 /* Service level security */
770 int l2cap_chan_check_security(struct l2cap_chan *chan)
772 struct l2cap_conn *conn = chan->conn;
775 if (conn->hcon->type == LE_LINK)
776 return smp_conn_security(conn->hcon, chan->sec_level);
778 auth_type = l2cap_get_auth_type(chan);
780 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
783 static u8 l2cap_get_ident(struct l2cap_conn *conn)
787 /* Get next available identificator.
788 * 1 - 128 are used by kernel.
789 * 129 - 199 are reserved.
790 * 200 - 254 are used by utilities like l2ping, etc.
793 spin_lock(&conn->lock);
795 if (++conn->tx_ident > 128)
800 spin_unlock(&conn->lock);
805 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
808 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
811 BT_DBG("code 0x%2.2x", code);
816 if (lmp_no_flush_capable(conn->hcon->hdev))
817 flags = ACL_START_NO_FLUSH;
821 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
822 skb->priority = HCI_PRIO_MAX;
824 hci_send_acl(conn->hchan, skb, flags);
827 static bool __chan_is_moving(struct l2cap_chan *chan)
829 return chan->move_state != L2CAP_MOVE_STABLE &&
830 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
833 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
835 struct hci_conn *hcon = chan->conn->hcon;
838 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
841 if (chan->hs_hcon && !__chan_is_moving(chan)) {
843 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
850 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
851 lmp_no_flush_capable(hcon->hdev))
852 flags = ACL_START_NO_FLUSH;
856 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
857 hci_send_acl(chan->conn->hchan, skb, flags);
860 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
862 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
863 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
865 if (enh & L2CAP_CTRL_FRAME_TYPE) {
868 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
869 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
876 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
877 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
884 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
886 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
887 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
889 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
892 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
893 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
900 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
901 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
908 static inline void __unpack_control(struct l2cap_chan *chan,
911 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
912 __unpack_extended_control(get_unaligned_le32(skb->data),
913 &bt_cb(skb)->control);
914 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
916 __unpack_enhanced_control(get_unaligned_le16(skb->data),
917 &bt_cb(skb)->control);
918 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
922 static u32 __pack_extended_control(struct l2cap_ctrl *control)
926 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
927 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
929 if (control->sframe) {
930 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
931 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
932 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
934 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
935 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
941 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
945 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
946 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
948 if (control->sframe) {
949 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
950 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
951 packed |= L2CAP_CTRL_FRAME_TYPE;
953 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
954 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
960 static inline void __pack_control(struct l2cap_chan *chan,
961 struct l2cap_ctrl *control,
964 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
965 put_unaligned_le32(__pack_extended_control(control),
966 skb->data + L2CAP_HDR_SIZE);
968 put_unaligned_le16(__pack_enhanced_control(control),
969 skb->data + L2CAP_HDR_SIZE);
973 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
975 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
976 return L2CAP_EXT_HDR_SIZE;
978 return L2CAP_ENH_HDR_SIZE;
981 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
985 struct l2cap_hdr *lh;
986 int hlen = __ertm_hdr_size(chan);
988 if (chan->fcs == L2CAP_FCS_CRC16)
989 hlen += L2CAP_FCS_SIZE;
991 skb = bt_skb_alloc(hlen, GFP_KERNEL);
994 return ERR_PTR(-ENOMEM);
996 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
997 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
998 lh->cid = cpu_to_le16(chan->dcid);
1000 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1001 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1003 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1005 if (chan->fcs == L2CAP_FCS_CRC16) {
1006 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1007 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1010 skb->priority = HCI_PRIO_MAX;
1014 static void l2cap_send_sframe(struct l2cap_chan *chan,
1015 struct l2cap_ctrl *control)
1017 struct sk_buff *skb;
1020 BT_DBG("chan %p, control %p", chan, control);
1022 if (!control->sframe)
1025 if (__chan_is_moving(chan))
1028 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1032 if (control->super == L2CAP_SUPER_RR)
1033 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1034 else if (control->super == L2CAP_SUPER_RNR)
1035 set_bit(CONN_RNR_SENT, &chan->conn_state);
1037 if (control->super != L2CAP_SUPER_SREJ) {
1038 chan->last_acked_seq = control->reqseq;
1039 __clear_ack_timer(chan);
1042 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1043 control->final, control->poll, control->super);
1045 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1046 control_field = __pack_extended_control(control);
1048 control_field = __pack_enhanced_control(control);
1050 skb = l2cap_create_sframe_pdu(chan, control_field);
1052 l2cap_do_send(chan, skb);
1055 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1057 struct l2cap_ctrl control;
1059 BT_DBG("chan %p, poll %d", chan, poll);
1061 memset(&control, 0, sizeof(control));
1063 control.poll = poll;
1065 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1066 control.super = L2CAP_SUPER_RNR;
1068 control.super = L2CAP_SUPER_RR;
1070 control.reqseq = chan->buffer_seq;
1071 l2cap_send_sframe(chan, &control);
1074 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1076 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1079 static bool __amp_capable(struct l2cap_chan *chan)
1081 struct l2cap_conn *conn = chan->conn;
1082 struct hci_dev *hdev;
1083 bool amp_available = false;
1085 if (!conn->hs_enabled)
1088 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1091 read_lock(&hci_dev_list_lock);
1092 list_for_each_entry(hdev, &hci_dev_list, list) {
1093 if (hdev->amp_type != AMP_TYPE_BREDR &&
1094 test_bit(HCI_UP, &hdev->flags)) {
1095 amp_available = true;
1099 read_unlock(&hci_dev_list_lock);
1101 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1102 return amp_available;
1107 static bool l2cap_check_efs(struct l2cap_chan *chan)
1109 /* Check EFS parameters */
1113 void l2cap_send_conn_req(struct l2cap_chan *chan)
1115 struct l2cap_conn *conn = chan->conn;
1116 struct l2cap_conn_req req;
1118 req.scid = cpu_to_le16(chan->scid);
1119 req.psm = chan->psm;
1121 chan->ident = l2cap_get_ident(conn);
1123 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1125 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1128 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1130 struct l2cap_create_chan_req req;
1131 req.scid = cpu_to_le16(chan->scid);
1132 req.psm = chan->psm;
1133 req.amp_id = amp_id;
1135 chan->ident = l2cap_get_ident(chan->conn);
1137 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1141 static void l2cap_move_setup(struct l2cap_chan *chan)
1143 struct sk_buff *skb;
1145 BT_DBG("chan %p", chan);
1147 if (chan->mode != L2CAP_MODE_ERTM)
1150 __clear_retrans_timer(chan);
1151 __clear_monitor_timer(chan);
1152 __clear_ack_timer(chan);
1154 chan->retry_count = 0;
1155 skb_queue_walk(&chan->tx_q, skb) {
1156 if (bt_cb(skb)->control.retries)
1157 bt_cb(skb)->control.retries = 1;
1162 chan->expected_tx_seq = chan->buffer_seq;
1164 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1165 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1166 l2cap_seq_list_clear(&chan->retrans_list);
1167 l2cap_seq_list_clear(&chan->srej_list);
1168 skb_queue_purge(&chan->srej_q);
1170 chan->tx_state = L2CAP_TX_STATE_XMIT;
1171 chan->rx_state = L2CAP_RX_STATE_MOVE;
1173 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1176 static void l2cap_move_done(struct l2cap_chan *chan)
1178 u8 move_role = chan->move_role;
1179 BT_DBG("chan %p", chan);
1181 chan->move_state = L2CAP_MOVE_STABLE;
1182 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1184 if (chan->mode != L2CAP_MODE_ERTM)
1187 switch (move_role) {
1188 case L2CAP_MOVE_ROLE_INITIATOR:
1189 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1190 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1192 case L2CAP_MOVE_ROLE_RESPONDER:
1193 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1198 static void l2cap_le_flowctl_start(struct l2cap_chan *chan)
1201 chan->sdu_last_frag = NULL;
1204 skb_queue_head_init(&chan->tx_q);
1207 static void l2cap_chan_ready(struct l2cap_chan *chan)
1209 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1210 chan->conf_state = 0;
1211 __clear_chan_timer(chan);
1213 if (chan->mode == L2CAP_MODE_LE_FLOWCTL)
1214 l2cap_le_flowctl_start(chan);
1216 chan->state = BT_CONNECTED;
1218 chan->ops->ready(chan);
1221 static void l2cap_le_connect(struct l2cap_chan *chan)
1223 struct l2cap_conn *conn = chan->conn;
1224 struct l2cap_le_conn_req req;
1226 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1229 req.psm = chan->psm;
1230 req.scid = cpu_to_le16(chan->scid);
1231 req.mtu = cpu_to_le16(chan->imtu);
1232 req.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
1233 req.credits = cpu_to_le16(chan->rx_credits);
1235 chan->ident = l2cap_get_ident(conn);
1237 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1241 static void l2cap_le_start(struct l2cap_chan *chan)
1243 struct l2cap_conn *conn = chan->conn;
1245 if (!smp_conn_security(conn->hcon, chan->sec_level))
1249 l2cap_chan_ready(chan);
1253 if (chan->state == BT_CONNECT)
1254 l2cap_le_connect(chan);
1257 static void l2cap_start_connection(struct l2cap_chan *chan)
1259 if (__amp_capable(chan)) {
1260 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1261 a2mp_discover_amp(chan);
1262 } else if (chan->conn->hcon->type == LE_LINK) {
1263 l2cap_le_start(chan);
1265 l2cap_send_conn_req(chan);
1269 static void l2cap_do_start(struct l2cap_chan *chan)
1271 struct l2cap_conn *conn = chan->conn;
1273 if (conn->hcon->type == LE_LINK) {
1274 l2cap_le_start(chan);
1278 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1279 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1282 if (l2cap_chan_check_security(chan) &&
1283 __l2cap_no_conn_pending(chan)) {
1284 l2cap_start_connection(chan);
1287 struct l2cap_info_req req;
1288 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1290 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1291 conn->info_ident = l2cap_get_ident(conn);
1293 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1295 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1300 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1302 u32 local_feat_mask = l2cap_feat_mask;
1304 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1307 case L2CAP_MODE_ERTM:
1308 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1309 case L2CAP_MODE_STREAMING:
1310 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1316 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1318 struct l2cap_conn *conn = chan->conn;
1319 struct l2cap_disconn_req req;
1324 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1325 __clear_retrans_timer(chan);
1326 __clear_monitor_timer(chan);
1327 __clear_ack_timer(chan);
1330 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1331 l2cap_state_change(chan, BT_DISCONN);
1335 req.dcid = cpu_to_le16(chan->dcid);
1336 req.scid = cpu_to_le16(chan->scid);
1337 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1340 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1343 /* ---- L2CAP connections ---- */
1344 static void l2cap_conn_start(struct l2cap_conn *conn)
1346 struct l2cap_chan *chan, *tmp;
1348 BT_DBG("conn %p", conn);
1350 mutex_lock(&conn->chan_lock);
1352 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1353 l2cap_chan_lock(chan);
1355 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1356 l2cap_chan_unlock(chan);
1360 if (chan->state == BT_CONNECT) {
1361 if (!l2cap_chan_check_security(chan) ||
1362 !__l2cap_no_conn_pending(chan)) {
1363 l2cap_chan_unlock(chan);
1367 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1368 && test_bit(CONF_STATE2_DEVICE,
1369 &chan->conf_state)) {
1370 l2cap_chan_close(chan, ECONNRESET);
1371 l2cap_chan_unlock(chan);
1375 l2cap_start_connection(chan);
1377 } else if (chan->state == BT_CONNECT2) {
1378 struct l2cap_conn_rsp rsp;
1380 rsp.scid = cpu_to_le16(chan->dcid);
1381 rsp.dcid = cpu_to_le16(chan->scid);
1383 if (l2cap_chan_check_security(chan)) {
1384 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1385 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1386 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1387 chan->ops->defer(chan);
1390 l2cap_state_change(chan, BT_CONFIG);
1391 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1392 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1395 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1396 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1399 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1402 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1403 rsp.result != L2CAP_CR_SUCCESS) {
1404 l2cap_chan_unlock(chan);
1408 set_bit(CONF_REQ_SENT, &chan->conf_state);
1409 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1410 l2cap_build_conf_req(chan, buf), buf);
1411 chan->num_conf_req++;
1414 l2cap_chan_unlock(chan);
1417 mutex_unlock(&conn->chan_lock);
1420 /* Find socket with cid and source/destination bdaddr.
1421 * Returns closest match, locked.
1423 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1427 struct l2cap_chan *c, *c1 = NULL;
1429 read_lock(&chan_list_lock);
1431 list_for_each_entry(c, &chan_list, global_l) {
1432 if (state && c->state != state)
1435 if (c->scid == cid) {
1436 int src_match, dst_match;
1437 int src_any, dst_any;
1440 src_match = !bacmp(&c->src, src);
1441 dst_match = !bacmp(&c->dst, dst);
1442 if (src_match && dst_match) {
1443 read_unlock(&chan_list_lock);
1448 src_any = !bacmp(&c->src, BDADDR_ANY);
1449 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1450 if ((src_match && dst_any) || (src_any && dst_match) ||
1451 (src_any && dst_any))
1456 read_unlock(&chan_list_lock);
1461 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1463 struct hci_conn *hcon = conn->hcon;
1464 struct l2cap_chan *chan, *pchan;
1469 /* Check if we have socket listening on cid */
1470 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1471 &hcon->src, &hcon->dst);
1475 /* Client ATT sockets should override the server one */
1476 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1479 dst_type = bdaddr_type(hcon, hcon->dst_type);
1481 /* If device is blocked, do not create a channel for it */
1482 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1485 l2cap_chan_lock(pchan);
1487 chan = pchan->ops->new_connection(pchan);
1491 chan->dcid = L2CAP_CID_ATT;
1493 bacpy(&chan->src, &hcon->src);
1494 bacpy(&chan->dst, &hcon->dst);
1495 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1496 chan->dst_type = dst_type;
1498 __l2cap_chan_add(conn, chan);
1501 l2cap_chan_unlock(pchan);
1504 static void l2cap_conn_ready(struct l2cap_conn *conn)
1506 struct l2cap_chan *chan;
1507 struct hci_conn *hcon = conn->hcon;
1509 BT_DBG("conn %p", conn);
1511 /* For outgoing pairing which doesn't necessarily have an
1512 * associated socket (e.g. mgmt_pair_device).
1514 if (hcon->out && hcon->type == LE_LINK)
1515 smp_conn_security(hcon, hcon->pending_sec_level);
1517 mutex_lock(&conn->chan_lock);
1519 if (hcon->type == LE_LINK)
1520 l2cap_le_conn_ready(conn);
1522 list_for_each_entry(chan, &conn->chan_l, list) {
1524 l2cap_chan_lock(chan);
1526 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1527 l2cap_chan_unlock(chan);
1531 if (hcon->type == LE_LINK) {
1532 l2cap_le_start(chan);
1533 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1534 l2cap_chan_ready(chan);
1536 } else if (chan->state == BT_CONNECT) {
1537 l2cap_do_start(chan);
1540 l2cap_chan_unlock(chan);
1543 mutex_unlock(&conn->chan_lock);
1546 /* Notify sockets that we cannot guaranty reliability anymore */
1547 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1549 struct l2cap_chan *chan;
1551 BT_DBG("conn %p", conn);
1553 mutex_lock(&conn->chan_lock);
1555 list_for_each_entry(chan, &conn->chan_l, list) {
1556 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1557 l2cap_chan_set_err(chan, err);
1560 mutex_unlock(&conn->chan_lock);
1563 static void l2cap_info_timeout(struct work_struct *work)
1565 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1568 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1569 conn->info_ident = 0;
1571 l2cap_conn_start(conn);
1576 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1577 * callback is called during registration. The ->remove callback is called
1578 * during unregistration.
1579 * An l2cap_user object can either be explicitly unregistered or when the
1580 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1581 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1582 * External modules must own a reference to the l2cap_conn object if they intend
1583 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1584 * any time if they don't.
1587 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1589 struct hci_dev *hdev = conn->hcon->hdev;
1592 /* We need to check whether l2cap_conn is registered. If it is not, we
1593 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1594 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1595 * relies on the parent hci_conn object to be locked. This itself relies
1596 * on the hci_dev object to be locked. So we must lock the hci device
1601 if (user->list.next || user->list.prev) {
1606 /* conn->hchan is NULL after l2cap_conn_del() was called */
1612 ret = user->probe(conn, user);
1616 list_add(&user->list, &conn->users);
1620 hci_dev_unlock(hdev);
1623 EXPORT_SYMBOL(l2cap_register_user);
1625 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1627 struct hci_dev *hdev = conn->hcon->hdev;
1631 if (!user->list.next || !user->list.prev)
1634 list_del(&user->list);
1635 user->list.next = NULL;
1636 user->list.prev = NULL;
1637 user->remove(conn, user);
1640 hci_dev_unlock(hdev);
1642 EXPORT_SYMBOL(l2cap_unregister_user);
1644 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1646 struct l2cap_user *user;
1648 while (!list_empty(&conn->users)) {
1649 user = list_first_entry(&conn->users, struct l2cap_user, list);
1650 list_del(&user->list);
1651 user->list.next = NULL;
1652 user->list.prev = NULL;
1653 user->remove(conn, user);
1657 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1659 struct l2cap_conn *conn = hcon->l2cap_data;
1660 struct l2cap_chan *chan, *l;
1665 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1667 kfree_skb(conn->rx_skb);
1669 l2cap_unregister_all_users(conn);
1671 mutex_lock(&conn->chan_lock);
1674 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1675 l2cap_chan_hold(chan);
1676 l2cap_chan_lock(chan);
1678 l2cap_chan_del(chan, err);
1680 l2cap_chan_unlock(chan);
1682 chan->ops->close(chan);
1683 l2cap_chan_put(chan);
1686 mutex_unlock(&conn->chan_lock);
1688 hci_chan_del(conn->hchan);
1690 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1691 cancel_delayed_work_sync(&conn->info_timer);
1693 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1694 cancel_delayed_work_sync(&conn->security_timer);
1695 smp_chan_destroy(conn);
1698 hcon->l2cap_data = NULL;
1700 l2cap_conn_put(conn);
1703 static void security_timeout(struct work_struct *work)
1705 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1706 security_timer.work);
1708 BT_DBG("conn %p", conn);
1710 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1711 smp_chan_destroy(conn);
1712 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1716 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1718 struct l2cap_conn *conn = hcon->l2cap_data;
1719 struct hci_chan *hchan;
1724 hchan = hci_chan_create(hcon);
1728 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1730 hci_chan_del(hchan);
1734 kref_init(&conn->ref);
1735 hcon->l2cap_data = conn;
1737 hci_conn_get(conn->hcon);
1738 conn->hchan = hchan;
1740 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1742 switch (hcon->type) {
1744 if (hcon->hdev->le_mtu) {
1745 conn->mtu = hcon->hdev->le_mtu;
1750 conn->mtu = hcon->hdev->acl_mtu;
1754 conn->feat_mask = 0;
1756 if (hcon->type == ACL_LINK)
1757 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1758 &hcon->hdev->dev_flags);
1760 spin_lock_init(&conn->lock);
1761 mutex_init(&conn->chan_lock);
1763 INIT_LIST_HEAD(&conn->chan_l);
1764 INIT_LIST_HEAD(&conn->users);
1766 if (hcon->type == LE_LINK)
1767 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1769 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1771 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1776 static void l2cap_conn_free(struct kref *ref)
1778 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1780 hci_conn_put(conn->hcon);
1784 void l2cap_conn_get(struct l2cap_conn *conn)
1786 kref_get(&conn->ref);
1788 EXPORT_SYMBOL(l2cap_conn_get);
1790 void l2cap_conn_put(struct l2cap_conn *conn)
1792 kref_put(&conn->ref, l2cap_conn_free);
1794 EXPORT_SYMBOL(l2cap_conn_put);
1796 /* ---- Socket interface ---- */
1798 /* Find socket with psm and source / destination bdaddr.
1799 * Returns closest match.
1801 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1806 struct l2cap_chan *c, *c1 = NULL;
1808 read_lock(&chan_list_lock);
1810 list_for_each_entry(c, &chan_list, global_l) {
1811 if (state && c->state != state)
1814 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1817 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1820 if (c->psm == psm) {
1821 int src_match, dst_match;
1822 int src_any, dst_any;
1825 src_match = !bacmp(&c->src, src);
1826 dst_match = !bacmp(&c->dst, dst);
1827 if (src_match && dst_match) {
1828 read_unlock(&chan_list_lock);
1833 src_any = !bacmp(&c->src, BDADDR_ANY);
1834 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1835 if ((src_match && dst_any) || (src_any && dst_match) ||
1836 (src_any && dst_any))
1841 read_unlock(&chan_list_lock);
1846 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1847 bdaddr_t *dst, u8 dst_type)
1849 struct l2cap_conn *conn;
1850 struct hci_conn *hcon;
1851 struct hci_dev *hdev;
1855 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1856 dst_type, __le16_to_cpu(psm));
1858 hdev = hci_get_route(dst, &chan->src);
1860 return -EHOSTUNREACH;
1864 l2cap_chan_lock(chan);
1866 /* PSM must be odd and lsb of upper byte must be 0 */
1867 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1868 chan->chan_type != L2CAP_CHAN_RAW) {
1873 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1878 switch (chan->mode) {
1879 case L2CAP_MODE_BASIC:
1880 case L2CAP_MODE_LE_FLOWCTL:
1882 case L2CAP_MODE_ERTM:
1883 case L2CAP_MODE_STREAMING:
1892 switch (chan->state) {
1896 /* Already connecting */
1901 /* Already connected */
1915 /* Set destination address and psm */
1916 bacpy(&chan->dst, dst);
1917 chan->dst_type = dst_type;
1922 auth_type = l2cap_get_auth_type(chan);
1924 if (bdaddr_type_is_le(dst_type))
1925 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1926 chan->sec_level, auth_type);
1928 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1929 chan->sec_level, auth_type);
1932 err = PTR_ERR(hcon);
1936 conn = l2cap_conn_add(hcon);
1938 hci_conn_drop(hcon);
1943 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1944 hci_conn_drop(hcon);
1949 /* Update source addr of the socket */
1950 bacpy(&chan->src, &hcon->src);
1951 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1953 l2cap_chan_unlock(chan);
1954 l2cap_chan_add(conn, chan);
1955 l2cap_chan_lock(chan);
1957 /* l2cap_chan_add takes its own ref so we can drop this one */
1958 hci_conn_drop(hcon);
1960 l2cap_state_change(chan, BT_CONNECT);
1961 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1963 if (hcon->state == BT_CONNECTED) {
1964 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1965 __clear_chan_timer(chan);
1966 if (l2cap_chan_check_security(chan))
1967 l2cap_state_change(chan, BT_CONNECTED);
1969 l2cap_do_start(chan);
1975 l2cap_chan_unlock(chan);
1976 hci_dev_unlock(hdev);
1981 static void l2cap_monitor_timeout(struct work_struct *work)
1983 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1984 monitor_timer.work);
1986 BT_DBG("chan %p", chan);
1988 l2cap_chan_lock(chan);
1991 l2cap_chan_unlock(chan);
1992 l2cap_chan_put(chan);
1996 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1998 l2cap_chan_unlock(chan);
1999 l2cap_chan_put(chan);
2002 static void l2cap_retrans_timeout(struct work_struct *work)
2004 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2005 retrans_timer.work);
2007 BT_DBG("chan %p", chan);
2009 l2cap_chan_lock(chan);
2012 l2cap_chan_unlock(chan);
2013 l2cap_chan_put(chan);
2017 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2018 l2cap_chan_unlock(chan);
2019 l2cap_chan_put(chan);
2022 static void l2cap_streaming_send(struct l2cap_chan *chan,
2023 struct sk_buff_head *skbs)
2025 struct sk_buff *skb;
2026 struct l2cap_ctrl *control;
2028 BT_DBG("chan %p, skbs %p", chan, skbs);
2030 if (__chan_is_moving(chan))
2033 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2035 while (!skb_queue_empty(&chan->tx_q)) {
2037 skb = skb_dequeue(&chan->tx_q);
2039 bt_cb(skb)->control.retries = 1;
2040 control = &bt_cb(skb)->control;
2042 control->reqseq = 0;
2043 control->txseq = chan->next_tx_seq;
2045 __pack_control(chan, control, skb);
2047 if (chan->fcs == L2CAP_FCS_CRC16) {
2048 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2049 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2052 l2cap_do_send(chan, skb);
2054 BT_DBG("Sent txseq %u", control->txseq);
2056 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2057 chan->frames_sent++;
2061 static int l2cap_ertm_send(struct l2cap_chan *chan)
2063 struct sk_buff *skb, *tx_skb;
2064 struct l2cap_ctrl *control;
2067 BT_DBG("chan %p", chan);
2069 if (chan->state != BT_CONNECTED)
2072 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2075 if (__chan_is_moving(chan))
2078 while (chan->tx_send_head &&
2079 chan->unacked_frames < chan->remote_tx_win &&
2080 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2082 skb = chan->tx_send_head;
2084 bt_cb(skb)->control.retries = 1;
2085 control = &bt_cb(skb)->control;
2087 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2090 control->reqseq = chan->buffer_seq;
2091 chan->last_acked_seq = chan->buffer_seq;
2092 control->txseq = chan->next_tx_seq;
2094 __pack_control(chan, control, skb);
2096 if (chan->fcs == L2CAP_FCS_CRC16) {
2097 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2098 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2101 /* Clone after data has been modified. Data is assumed to be
2102 read-only (for locking purposes) on cloned sk_buffs.
2104 tx_skb = skb_clone(skb, GFP_KERNEL);
2109 __set_retrans_timer(chan);
2111 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2112 chan->unacked_frames++;
2113 chan->frames_sent++;
2116 if (skb_queue_is_last(&chan->tx_q, skb))
2117 chan->tx_send_head = NULL;
2119 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2121 l2cap_do_send(chan, tx_skb);
2122 BT_DBG("Sent txseq %u", control->txseq);
2125 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2126 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2131 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2133 struct l2cap_ctrl control;
2134 struct sk_buff *skb;
2135 struct sk_buff *tx_skb;
2138 BT_DBG("chan %p", chan);
2140 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2143 if (__chan_is_moving(chan))
2146 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2147 seq = l2cap_seq_list_pop(&chan->retrans_list);
2149 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2151 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2156 bt_cb(skb)->control.retries++;
2157 control = bt_cb(skb)->control;
2159 if (chan->max_tx != 0 &&
2160 bt_cb(skb)->control.retries > chan->max_tx) {
2161 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2162 l2cap_send_disconn_req(chan, ECONNRESET);
2163 l2cap_seq_list_clear(&chan->retrans_list);
2167 control.reqseq = chan->buffer_seq;
2168 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2173 if (skb_cloned(skb)) {
2174 /* Cloned sk_buffs are read-only, so we need a
2177 tx_skb = skb_copy(skb, GFP_KERNEL);
2179 tx_skb = skb_clone(skb, GFP_KERNEL);
2183 l2cap_seq_list_clear(&chan->retrans_list);
2187 /* Update skb contents */
2188 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2189 put_unaligned_le32(__pack_extended_control(&control),
2190 tx_skb->data + L2CAP_HDR_SIZE);
2192 put_unaligned_le16(__pack_enhanced_control(&control),
2193 tx_skb->data + L2CAP_HDR_SIZE);
2196 if (chan->fcs == L2CAP_FCS_CRC16) {
2197 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2198 put_unaligned_le16(fcs, skb_put(tx_skb,
2202 l2cap_do_send(chan, tx_skb);
2204 BT_DBG("Resent txseq %d", control.txseq);
2206 chan->last_acked_seq = chan->buffer_seq;
2210 static void l2cap_retransmit(struct l2cap_chan *chan,
2211 struct l2cap_ctrl *control)
2213 BT_DBG("chan %p, control %p", chan, control);
2215 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2216 l2cap_ertm_resend(chan);
2219 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2220 struct l2cap_ctrl *control)
2222 struct sk_buff *skb;
2224 BT_DBG("chan %p, control %p", chan, control);
2227 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2229 l2cap_seq_list_clear(&chan->retrans_list);
2231 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2234 if (chan->unacked_frames) {
2235 skb_queue_walk(&chan->tx_q, skb) {
2236 if (bt_cb(skb)->control.txseq == control->reqseq ||
2237 skb == chan->tx_send_head)
2241 skb_queue_walk_from(&chan->tx_q, skb) {
2242 if (skb == chan->tx_send_head)
2245 l2cap_seq_list_append(&chan->retrans_list,
2246 bt_cb(skb)->control.txseq);
2249 l2cap_ertm_resend(chan);
2253 static void l2cap_send_ack(struct l2cap_chan *chan)
2255 struct l2cap_ctrl control;
2256 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2257 chan->last_acked_seq);
2260 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2261 chan, chan->last_acked_seq, chan->buffer_seq);
2263 memset(&control, 0, sizeof(control));
2266 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2267 chan->rx_state == L2CAP_RX_STATE_RECV) {
2268 __clear_ack_timer(chan);
2269 control.super = L2CAP_SUPER_RNR;
2270 control.reqseq = chan->buffer_seq;
2271 l2cap_send_sframe(chan, &control);
2273 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2274 l2cap_ertm_send(chan);
2275 /* If any i-frames were sent, they included an ack */
2276 if (chan->buffer_seq == chan->last_acked_seq)
2280 /* Ack now if the window is 3/4ths full.
2281 * Calculate without mul or div
2283 threshold = chan->ack_win;
2284 threshold += threshold << 1;
2287 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2290 if (frames_to_ack >= threshold) {
2291 __clear_ack_timer(chan);
2292 control.super = L2CAP_SUPER_RR;
2293 control.reqseq = chan->buffer_seq;
2294 l2cap_send_sframe(chan, &control);
2299 __set_ack_timer(chan);
2303 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2304 struct msghdr *msg, int len,
2305 int count, struct sk_buff *skb)
2307 struct l2cap_conn *conn = chan->conn;
2308 struct sk_buff **frag;
2311 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2317 /* Continuation fragments (no L2CAP header) */
2318 frag = &skb_shinfo(skb)->frag_list;
2320 struct sk_buff *tmp;
2322 count = min_t(unsigned int, conn->mtu, len);
2324 tmp = chan->ops->alloc_skb(chan, count,
2325 msg->msg_flags & MSG_DONTWAIT);
2327 return PTR_ERR(tmp);
2331 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2334 (*frag)->priority = skb->priority;
2339 skb->len += (*frag)->len;
2340 skb->data_len += (*frag)->len;
2342 frag = &(*frag)->next;
2348 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2349 struct msghdr *msg, size_t len,
2352 struct l2cap_conn *conn = chan->conn;
2353 struct sk_buff *skb;
2354 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2355 struct l2cap_hdr *lh;
2357 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2358 __le16_to_cpu(chan->psm), len, priority);
2360 count = min_t(unsigned int, (conn->mtu - hlen), len);
2362 skb = chan->ops->alloc_skb(chan, count + hlen,
2363 msg->msg_flags & MSG_DONTWAIT);
2367 skb->priority = priority;
2369 /* Create L2CAP header */
2370 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2371 lh->cid = cpu_to_le16(chan->dcid);
2372 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2373 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2375 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2376 if (unlikely(err < 0)) {
2378 return ERR_PTR(err);
2383 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2384 struct msghdr *msg, size_t len,
2387 struct l2cap_conn *conn = chan->conn;
2388 struct sk_buff *skb;
2390 struct l2cap_hdr *lh;
2392 BT_DBG("chan %p len %zu", chan, len);
2394 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2396 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2397 msg->msg_flags & MSG_DONTWAIT);
2401 skb->priority = priority;
2403 /* Create L2CAP header */
2404 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2405 lh->cid = cpu_to_le16(chan->dcid);
2406 lh->len = cpu_to_le16(len);
2408 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2409 if (unlikely(err < 0)) {
2411 return ERR_PTR(err);
2416 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2417 struct msghdr *msg, size_t len,
2420 struct l2cap_conn *conn = chan->conn;
2421 struct sk_buff *skb;
2422 int err, count, hlen;
2423 struct l2cap_hdr *lh;
2425 BT_DBG("chan %p len %zu", chan, len);
2428 return ERR_PTR(-ENOTCONN);
2430 hlen = __ertm_hdr_size(chan);
2433 hlen += L2CAP_SDULEN_SIZE;
2435 if (chan->fcs == L2CAP_FCS_CRC16)
2436 hlen += L2CAP_FCS_SIZE;
2438 count = min_t(unsigned int, (conn->mtu - hlen), len);
2440 skb = chan->ops->alloc_skb(chan, count + hlen,
2441 msg->msg_flags & MSG_DONTWAIT);
2445 /* Create L2CAP header */
2446 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2447 lh->cid = cpu_to_le16(chan->dcid);
2448 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2450 /* Control header is populated later */
2451 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2452 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2454 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2457 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2459 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2460 if (unlikely(err < 0)) {
2462 return ERR_PTR(err);
2465 bt_cb(skb)->control.fcs = chan->fcs;
2466 bt_cb(skb)->control.retries = 0;
2470 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2471 struct sk_buff_head *seg_queue,
2472 struct msghdr *msg, size_t len)
2474 struct sk_buff *skb;
2479 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2481 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2482 * so fragmented skbs are not used. The HCI layer's handling
2483 * of fragmented skbs is not compatible with ERTM's queueing.
2486 /* PDU size is derived from the HCI MTU */
2487 pdu_len = chan->conn->mtu;
2489 /* Constrain PDU size for BR/EDR connections */
2491 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2493 /* Adjust for largest possible L2CAP overhead. */
2495 pdu_len -= L2CAP_FCS_SIZE;
2497 pdu_len -= __ertm_hdr_size(chan);
2499 /* Remote device may have requested smaller PDUs */
2500 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2502 if (len <= pdu_len) {
2503 sar = L2CAP_SAR_UNSEGMENTED;
2507 sar = L2CAP_SAR_START;
2509 pdu_len -= L2CAP_SDULEN_SIZE;
2513 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2516 __skb_queue_purge(seg_queue);
2517 return PTR_ERR(skb);
2520 bt_cb(skb)->control.sar = sar;
2521 __skb_queue_tail(seg_queue, skb);
2526 pdu_len += L2CAP_SDULEN_SIZE;
2529 if (len <= pdu_len) {
2530 sar = L2CAP_SAR_END;
2533 sar = L2CAP_SAR_CONTINUE;
2540 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2542 size_t len, u16 sdulen)
2544 struct l2cap_conn *conn = chan->conn;
2545 struct sk_buff *skb;
2546 int err, count, hlen;
2547 struct l2cap_hdr *lh;
2549 BT_DBG("chan %p len %zu", chan, len);
2552 return ERR_PTR(-ENOTCONN);
2554 hlen = L2CAP_HDR_SIZE;
2557 hlen += L2CAP_SDULEN_SIZE;
2559 count = min_t(unsigned int, (conn->mtu - hlen), len);
2561 skb = chan->ops->alloc_skb(chan, count + hlen,
2562 msg->msg_flags & MSG_DONTWAIT);
2566 /* Create L2CAP header */
2567 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2568 lh->cid = cpu_to_le16(chan->dcid);
2569 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2572 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2574 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2575 if (unlikely(err < 0)) {
2577 return ERR_PTR(err);
2583 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2584 struct sk_buff_head *seg_queue,
2585 struct msghdr *msg, size_t len)
2587 struct sk_buff *skb;
2591 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2593 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2595 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2598 pdu_len -= L2CAP_SDULEN_SIZE;
2604 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2606 __skb_queue_purge(seg_queue);
2607 return PTR_ERR(skb);
2610 __skb_queue_tail(seg_queue, skb);
2616 pdu_len += L2CAP_SDULEN_SIZE;
2623 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2626 struct sk_buff *skb;
2628 struct sk_buff_head seg_queue;
2633 /* Connectionless channel */
2634 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2635 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2637 return PTR_ERR(skb);
2639 l2cap_do_send(chan, skb);
2643 switch (chan->mode) {
2644 case L2CAP_MODE_LE_FLOWCTL:
2645 /* Check outgoing MTU */
2646 if (len > chan->omtu)
2649 if (!chan->tx_credits)
2652 __skb_queue_head_init(&seg_queue);
2654 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2656 if (chan->state != BT_CONNECTED) {
2657 __skb_queue_purge(&seg_queue);
2664 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2666 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2667 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2671 if (!chan->tx_credits)
2672 chan->ops->suspend(chan);
2678 case L2CAP_MODE_BASIC:
2679 /* Check outgoing MTU */
2680 if (len > chan->omtu)
2683 /* Create a basic PDU */
2684 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2686 return PTR_ERR(skb);
2688 l2cap_do_send(chan, skb);
2692 case L2CAP_MODE_ERTM:
2693 case L2CAP_MODE_STREAMING:
2694 /* Check outgoing MTU */
2695 if (len > chan->omtu) {
2700 __skb_queue_head_init(&seg_queue);
2702 /* Do segmentation before calling in to the state machine,
2703 * since it's possible to block while waiting for memory
2706 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2708 /* The channel could have been closed while segmenting,
2709 * check that it is still connected.
2711 if (chan->state != BT_CONNECTED) {
2712 __skb_queue_purge(&seg_queue);
2719 if (chan->mode == L2CAP_MODE_ERTM)
2720 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2722 l2cap_streaming_send(chan, &seg_queue);
2726 /* If the skbs were not queued for sending, they'll still be in
2727 * seg_queue and need to be purged.
2729 __skb_queue_purge(&seg_queue);
2733 BT_DBG("bad state %1.1x", chan->mode);
2740 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2742 struct l2cap_ctrl control;
2745 BT_DBG("chan %p, txseq %u", chan, txseq);
2747 memset(&control, 0, sizeof(control));
2749 control.super = L2CAP_SUPER_SREJ;
2751 for (seq = chan->expected_tx_seq; seq != txseq;
2752 seq = __next_seq(chan, seq)) {
2753 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2754 control.reqseq = seq;
2755 l2cap_send_sframe(chan, &control);
2756 l2cap_seq_list_append(&chan->srej_list, seq);
2760 chan->expected_tx_seq = __next_seq(chan, txseq);
2763 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2765 struct l2cap_ctrl control;
2767 BT_DBG("chan %p", chan);
2769 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2772 memset(&control, 0, sizeof(control));
2774 control.super = L2CAP_SUPER_SREJ;
2775 control.reqseq = chan->srej_list.tail;
2776 l2cap_send_sframe(chan, &control);
2779 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2781 struct l2cap_ctrl control;
2785 BT_DBG("chan %p, txseq %u", chan, txseq);
2787 memset(&control, 0, sizeof(control));
2789 control.super = L2CAP_SUPER_SREJ;
2791 /* Capture initial list head to allow only one pass through the list. */
2792 initial_head = chan->srej_list.head;
2795 seq = l2cap_seq_list_pop(&chan->srej_list);
2796 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2799 control.reqseq = seq;
2800 l2cap_send_sframe(chan, &control);
2801 l2cap_seq_list_append(&chan->srej_list, seq);
2802 } while (chan->srej_list.head != initial_head);
2805 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2807 struct sk_buff *acked_skb;
2810 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2812 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2815 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2816 chan->expected_ack_seq, chan->unacked_frames);
2818 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2819 ackseq = __next_seq(chan, ackseq)) {
2821 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2823 skb_unlink(acked_skb, &chan->tx_q);
2824 kfree_skb(acked_skb);
2825 chan->unacked_frames--;
2829 chan->expected_ack_seq = reqseq;
2831 if (chan->unacked_frames == 0)
2832 __clear_retrans_timer(chan);
2834 BT_DBG("unacked_frames %u", chan->unacked_frames);
2837 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2839 BT_DBG("chan %p", chan);
2841 chan->expected_tx_seq = chan->buffer_seq;
2842 l2cap_seq_list_clear(&chan->srej_list);
2843 skb_queue_purge(&chan->srej_q);
2844 chan->rx_state = L2CAP_RX_STATE_RECV;
2847 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2848 struct l2cap_ctrl *control,
2849 struct sk_buff_head *skbs, u8 event)
2851 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2855 case L2CAP_EV_DATA_REQUEST:
2856 if (chan->tx_send_head == NULL)
2857 chan->tx_send_head = skb_peek(skbs);
2859 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2860 l2cap_ertm_send(chan);
2862 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2863 BT_DBG("Enter LOCAL_BUSY");
2864 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2866 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2867 /* The SREJ_SENT state must be aborted if we are to
2868 * enter the LOCAL_BUSY state.
2870 l2cap_abort_rx_srej_sent(chan);
2873 l2cap_send_ack(chan);
2876 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2877 BT_DBG("Exit LOCAL_BUSY");
2878 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2880 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2881 struct l2cap_ctrl local_control;
2883 memset(&local_control, 0, sizeof(local_control));
2884 local_control.sframe = 1;
2885 local_control.super = L2CAP_SUPER_RR;
2886 local_control.poll = 1;
2887 local_control.reqseq = chan->buffer_seq;
2888 l2cap_send_sframe(chan, &local_control);
2890 chan->retry_count = 1;
2891 __set_monitor_timer(chan);
2892 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2895 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2896 l2cap_process_reqseq(chan, control->reqseq);
2898 case L2CAP_EV_EXPLICIT_POLL:
2899 l2cap_send_rr_or_rnr(chan, 1);
2900 chan->retry_count = 1;
2901 __set_monitor_timer(chan);
2902 __clear_ack_timer(chan);
2903 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2905 case L2CAP_EV_RETRANS_TO:
2906 l2cap_send_rr_or_rnr(chan, 1);
2907 chan->retry_count = 1;
2908 __set_monitor_timer(chan);
2909 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2911 case L2CAP_EV_RECV_FBIT:
2912 /* Nothing to process */
2919 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2920 struct l2cap_ctrl *control,
2921 struct sk_buff_head *skbs, u8 event)
2923 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2927 case L2CAP_EV_DATA_REQUEST:
2928 if (chan->tx_send_head == NULL)
2929 chan->tx_send_head = skb_peek(skbs);
2930 /* Queue data, but don't send. */
2931 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2933 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2934 BT_DBG("Enter LOCAL_BUSY");
2935 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2937 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2938 /* The SREJ_SENT state must be aborted if we are to
2939 * enter the LOCAL_BUSY state.
2941 l2cap_abort_rx_srej_sent(chan);
2944 l2cap_send_ack(chan);
2947 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2948 BT_DBG("Exit LOCAL_BUSY");
2949 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2951 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2952 struct l2cap_ctrl local_control;
2953 memset(&local_control, 0, sizeof(local_control));
2954 local_control.sframe = 1;
2955 local_control.super = L2CAP_SUPER_RR;
2956 local_control.poll = 1;
2957 local_control.reqseq = chan->buffer_seq;
2958 l2cap_send_sframe(chan, &local_control);
2960 chan->retry_count = 1;
2961 __set_monitor_timer(chan);
2962 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2965 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2966 l2cap_process_reqseq(chan, control->reqseq);
2970 case L2CAP_EV_RECV_FBIT:
2971 if (control && control->final) {
2972 __clear_monitor_timer(chan);
2973 if (chan->unacked_frames > 0)
2974 __set_retrans_timer(chan);
2975 chan->retry_count = 0;
2976 chan->tx_state = L2CAP_TX_STATE_XMIT;
2977 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2980 case L2CAP_EV_EXPLICIT_POLL:
2983 case L2CAP_EV_MONITOR_TO:
2984 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2985 l2cap_send_rr_or_rnr(chan, 1);
2986 __set_monitor_timer(chan);
2987 chan->retry_count++;
2989 l2cap_send_disconn_req(chan, ECONNABORTED);
2997 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2998 struct sk_buff_head *skbs, u8 event)
3000 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3001 chan, control, skbs, event, chan->tx_state);
3003 switch (chan->tx_state) {
3004 case L2CAP_TX_STATE_XMIT:
3005 l2cap_tx_state_xmit(chan, control, skbs, event);
3007 case L2CAP_TX_STATE_WAIT_F:
3008 l2cap_tx_state_wait_f(chan, control, skbs, event);
3016 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3017 struct l2cap_ctrl *control)
3019 BT_DBG("chan %p, control %p", chan, control);
3020 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3023 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3024 struct l2cap_ctrl *control)
3026 BT_DBG("chan %p, control %p", chan, control);
3027 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3030 /* Copy frame to all raw sockets on that connection */
3031 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3033 struct sk_buff *nskb;
3034 struct l2cap_chan *chan;
3036 BT_DBG("conn %p", conn);
3038 mutex_lock(&conn->chan_lock);
3040 list_for_each_entry(chan, &conn->chan_l, list) {
3041 if (chan->chan_type != L2CAP_CHAN_RAW)
3044 /* Don't send frame to the channel it came from */
3045 if (bt_cb(skb)->chan == chan)
3048 nskb = skb_clone(skb, GFP_KERNEL);
3051 if (chan->ops->recv(chan, nskb))
3055 mutex_unlock(&conn->chan_lock);
3058 /* ---- L2CAP signalling commands ---- */
3059 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3060 u8 ident, u16 dlen, void *data)
3062 struct sk_buff *skb, **frag;
3063 struct l2cap_cmd_hdr *cmd;
3064 struct l2cap_hdr *lh;
3067 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3068 conn, code, ident, dlen);
3070 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3073 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3074 count = min_t(unsigned int, conn->mtu, len);
3076 skb = bt_skb_alloc(count, GFP_KERNEL);
3080 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3081 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3083 if (conn->hcon->type == LE_LINK)
3084 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3086 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3088 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3091 cmd->len = cpu_to_le16(dlen);
3094 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3095 memcpy(skb_put(skb, count), data, count);
3101 /* Continuation fragments (no L2CAP header) */
3102 frag = &skb_shinfo(skb)->frag_list;
3104 count = min_t(unsigned int, conn->mtu, len);
3106 *frag = bt_skb_alloc(count, GFP_KERNEL);
3110 memcpy(skb_put(*frag, count), data, count);
3115 frag = &(*frag)->next;
3125 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3128 struct l2cap_conf_opt *opt = *ptr;
3131 len = L2CAP_CONF_OPT_SIZE + opt->len;
3139 *val = *((u8 *) opt->val);
3143 *val = get_unaligned_le16(opt->val);
3147 *val = get_unaligned_le32(opt->val);
3151 *val = (unsigned long) opt->val;
3155 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3159 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3161 struct l2cap_conf_opt *opt = *ptr;
3163 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3170 *((u8 *) opt->val) = val;
3174 put_unaligned_le16(val, opt->val);
3178 put_unaligned_le32(val, opt->val);
3182 memcpy(opt->val, (void *) val, len);
3186 *ptr += L2CAP_CONF_OPT_SIZE + len;
3189 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3191 struct l2cap_conf_efs efs;
3193 switch (chan->mode) {
3194 case L2CAP_MODE_ERTM:
3195 efs.id = chan->local_id;
3196 efs.stype = chan->local_stype;
3197 efs.msdu = cpu_to_le16(chan->local_msdu);
3198 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3199 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3200 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3203 case L2CAP_MODE_STREAMING:
3205 efs.stype = L2CAP_SERV_BESTEFFORT;
3206 efs.msdu = cpu_to_le16(chan->local_msdu);
3207 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3216 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3217 (unsigned long) &efs);
3220 static void l2cap_ack_timeout(struct work_struct *work)
3222 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3226 BT_DBG("chan %p", chan);
3228 l2cap_chan_lock(chan);
3230 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3231 chan->last_acked_seq);
3234 l2cap_send_rr_or_rnr(chan, 0);
3236 l2cap_chan_unlock(chan);
3237 l2cap_chan_put(chan);
3240 int l2cap_ertm_init(struct l2cap_chan *chan)
3244 chan->next_tx_seq = 0;
3245 chan->expected_tx_seq = 0;
3246 chan->expected_ack_seq = 0;
3247 chan->unacked_frames = 0;
3248 chan->buffer_seq = 0;
3249 chan->frames_sent = 0;
3250 chan->last_acked_seq = 0;
3252 chan->sdu_last_frag = NULL;
3255 skb_queue_head_init(&chan->tx_q);
3257 chan->local_amp_id = AMP_ID_BREDR;
3258 chan->move_id = AMP_ID_BREDR;
3259 chan->move_state = L2CAP_MOVE_STABLE;
3260 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3262 if (chan->mode != L2CAP_MODE_ERTM)
3265 chan->rx_state = L2CAP_RX_STATE_RECV;
3266 chan->tx_state = L2CAP_TX_STATE_XMIT;
3268 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3269 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3270 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3272 skb_queue_head_init(&chan->srej_q);
3274 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3278 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3280 l2cap_seq_list_free(&chan->srej_list);
3285 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3288 case L2CAP_MODE_STREAMING:
3289 case L2CAP_MODE_ERTM:
3290 if (l2cap_mode_supported(mode, remote_feat_mask))
3294 return L2CAP_MODE_BASIC;
3298 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3300 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3303 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3305 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3308 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3309 struct l2cap_conf_rfc *rfc)
3311 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3312 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3314 /* Class 1 devices have must have ERTM timeouts
3315 * exceeding the Link Supervision Timeout. The
3316 * default Link Supervision Timeout for AMP
3317 * controllers is 10 seconds.
3319 * Class 1 devices use 0xffffffff for their
3320 * best-effort flush timeout, so the clamping logic
3321 * will result in a timeout that meets the above
3322 * requirement. ERTM timeouts are 16-bit values, so
3323 * the maximum timeout is 65.535 seconds.
3326 /* Convert timeout to milliseconds and round */
3327 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3329 /* This is the recommended formula for class 2 devices
3330 * that start ERTM timers when packets are sent to the
3333 ertm_to = 3 * ertm_to + 500;
3335 if (ertm_to > 0xffff)
3338 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3339 rfc->monitor_timeout = rfc->retrans_timeout;
3341 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3342 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3346 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3348 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3349 __l2cap_ews_supported(chan->conn)) {
3350 /* use extended control field */
3351 set_bit(FLAG_EXT_CTRL, &chan->flags);
3352 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3354 chan->tx_win = min_t(u16, chan->tx_win,
3355 L2CAP_DEFAULT_TX_WINDOW);
3356 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3358 chan->ack_win = chan->tx_win;
3361 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3363 struct l2cap_conf_req *req = data;
3364 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3365 void *ptr = req->data;
3368 BT_DBG("chan %p", chan);
3370 if (chan->num_conf_req || chan->num_conf_rsp)
3373 switch (chan->mode) {
3374 case L2CAP_MODE_STREAMING:
3375 case L2CAP_MODE_ERTM:
3376 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3379 if (__l2cap_efs_supported(chan->conn))
3380 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3384 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3389 if (chan->imtu != L2CAP_DEFAULT_MTU)
3390 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3392 switch (chan->mode) {
3393 case L2CAP_MODE_BASIC:
3394 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3395 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3398 rfc.mode = L2CAP_MODE_BASIC;
3400 rfc.max_transmit = 0;
3401 rfc.retrans_timeout = 0;
3402 rfc.monitor_timeout = 0;
3403 rfc.max_pdu_size = 0;
3405 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3406 (unsigned long) &rfc);
3409 case L2CAP_MODE_ERTM:
3410 rfc.mode = L2CAP_MODE_ERTM;
3411 rfc.max_transmit = chan->max_tx;
3413 __l2cap_set_ertm_timeouts(chan, &rfc);
3415 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3416 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3418 rfc.max_pdu_size = cpu_to_le16(size);
3420 l2cap_txwin_setup(chan);
3422 rfc.txwin_size = min_t(u16, chan->tx_win,
3423 L2CAP_DEFAULT_TX_WINDOW);
3425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3426 (unsigned long) &rfc);
3428 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3429 l2cap_add_opt_efs(&ptr, chan);
3431 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3432 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3435 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3436 if (chan->fcs == L2CAP_FCS_NONE ||
3437 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3438 chan->fcs = L2CAP_FCS_NONE;
3439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3444 case L2CAP_MODE_STREAMING:
3445 l2cap_txwin_setup(chan);
3446 rfc.mode = L2CAP_MODE_STREAMING;
3448 rfc.max_transmit = 0;
3449 rfc.retrans_timeout = 0;
3450 rfc.monitor_timeout = 0;
3452 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3453 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3455 rfc.max_pdu_size = cpu_to_le16(size);
3457 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3458 (unsigned long) &rfc);
3460 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3461 l2cap_add_opt_efs(&ptr, chan);
3463 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3464 if (chan->fcs == L2CAP_FCS_NONE ||
3465 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3466 chan->fcs = L2CAP_FCS_NONE;
3467 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3473 req->dcid = cpu_to_le16(chan->dcid);
3474 req->flags = __constant_cpu_to_le16(0);
3479 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3481 struct l2cap_conf_rsp *rsp = data;
3482 void *ptr = rsp->data;
3483 void *req = chan->conf_req;
3484 int len = chan->conf_len;
3485 int type, hint, olen;
3487 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3488 struct l2cap_conf_efs efs;
3490 u16 mtu = L2CAP_DEFAULT_MTU;
3491 u16 result = L2CAP_CONF_SUCCESS;
3494 BT_DBG("chan %p", chan);
3496 while (len >= L2CAP_CONF_OPT_SIZE) {
3497 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3499 hint = type & L2CAP_CONF_HINT;
3500 type &= L2CAP_CONF_MASK;
3503 case L2CAP_CONF_MTU:
3507 case L2CAP_CONF_FLUSH_TO:
3508 chan->flush_to = val;
3511 case L2CAP_CONF_QOS:
3514 case L2CAP_CONF_RFC:
3515 if (olen == sizeof(rfc))
3516 memcpy(&rfc, (void *) val, olen);
3519 case L2CAP_CONF_FCS:
3520 if (val == L2CAP_FCS_NONE)
3521 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3524 case L2CAP_CONF_EFS:
3526 if (olen == sizeof(efs))
3527 memcpy(&efs, (void *) val, olen);
3530 case L2CAP_CONF_EWS:
3531 if (!chan->conn->hs_enabled)
3532 return -ECONNREFUSED;
3534 set_bit(FLAG_EXT_CTRL, &chan->flags);
3535 set_bit(CONF_EWS_RECV, &chan->conf_state);
3536 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3537 chan->remote_tx_win = val;
3544 result = L2CAP_CONF_UNKNOWN;
3545 *((u8 *) ptr++) = type;
3550 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3553 switch (chan->mode) {
3554 case L2CAP_MODE_STREAMING:
3555 case L2CAP_MODE_ERTM:
3556 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3557 chan->mode = l2cap_select_mode(rfc.mode,
3558 chan->conn->feat_mask);
3563 if (__l2cap_efs_supported(chan->conn))
3564 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3566 return -ECONNREFUSED;
3569 if (chan->mode != rfc.mode)
3570 return -ECONNREFUSED;
3576 if (chan->mode != rfc.mode) {
3577 result = L2CAP_CONF_UNACCEPT;
3578 rfc.mode = chan->mode;
3580 if (chan->num_conf_rsp == 1)
3581 return -ECONNREFUSED;
3583 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3584 (unsigned long) &rfc);
3587 if (result == L2CAP_CONF_SUCCESS) {
3588 /* Configure output options and let the other side know
3589 * which ones we don't like. */
3591 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3592 result = L2CAP_CONF_UNACCEPT;
3595 set_bit(CONF_MTU_DONE, &chan->conf_state);
3597 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3600 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3601 efs.stype != L2CAP_SERV_NOTRAFIC &&
3602 efs.stype != chan->local_stype) {
3604 result = L2CAP_CONF_UNACCEPT;
3606 if (chan->num_conf_req >= 1)
3607 return -ECONNREFUSED;
3609 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3611 (unsigned long) &efs);
3613 /* Send PENDING Conf Rsp */
3614 result = L2CAP_CONF_PENDING;
3615 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3620 case L2CAP_MODE_BASIC:
3621 chan->fcs = L2CAP_FCS_NONE;
3622 set_bit(CONF_MODE_DONE, &chan->conf_state);
3625 case L2CAP_MODE_ERTM:
3626 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3627 chan->remote_tx_win = rfc.txwin_size;
3629 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3631 chan->remote_max_tx = rfc.max_transmit;
3633 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3634 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3635 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3636 rfc.max_pdu_size = cpu_to_le16(size);
3637 chan->remote_mps = size;
3639 __l2cap_set_ertm_timeouts(chan, &rfc);
3641 set_bit(CONF_MODE_DONE, &chan->conf_state);
3643 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3644 sizeof(rfc), (unsigned long) &rfc);
3646 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3647 chan->remote_id = efs.id;
3648 chan->remote_stype = efs.stype;
3649 chan->remote_msdu = le16_to_cpu(efs.msdu);
3650 chan->remote_flush_to =
3651 le32_to_cpu(efs.flush_to);
3652 chan->remote_acc_lat =
3653 le32_to_cpu(efs.acc_lat);
3654 chan->remote_sdu_itime =
3655 le32_to_cpu(efs.sdu_itime);
3656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3658 (unsigned long) &efs);
3662 case L2CAP_MODE_STREAMING:
3663 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3664 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3665 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3666 rfc.max_pdu_size = cpu_to_le16(size);
3667 chan->remote_mps = size;
3669 set_bit(CONF_MODE_DONE, &chan->conf_state);
3671 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3672 (unsigned long) &rfc);
3677 result = L2CAP_CONF_UNACCEPT;
3679 memset(&rfc, 0, sizeof(rfc));
3680 rfc.mode = chan->mode;
3683 if (result == L2CAP_CONF_SUCCESS)
3684 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3686 rsp->scid = cpu_to_le16(chan->dcid);
3687 rsp->result = cpu_to_le16(result);
3688 rsp->flags = __constant_cpu_to_le16(0);
3693 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3694 void *data, u16 *result)
3696 struct l2cap_conf_req *req = data;
3697 void *ptr = req->data;
3700 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3701 struct l2cap_conf_efs efs;
3703 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3705 while (len >= L2CAP_CONF_OPT_SIZE) {
3706 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3709 case L2CAP_CONF_MTU:
3710 if (val < L2CAP_DEFAULT_MIN_MTU) {
3711 *result = L2CAP_CONF_UNACCEPT;
3712 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3715 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3718 case L2CAP_CONF_FLUSH_TO:
3719 chan->flush_to = val;
3720 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3724 case L2CAP_CONF_RFC:
3725 if (olen == sizeof(rfc))
3726 memcpy(&rfc, (void *)val, olen);
3728 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3729 rfc.mode != chan->mode)
3730 return -ECONNREFUSED;
3734 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3735 sizeof(rfc), (unsigned long) &rfc);
3738 case L2CAP_CONF_EWS:
3739 chan->ack_win = min_t(u16, val, chan->ack_win);
3740 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3744 case L2CAP_CONF_EFS:
3745 if (olen == sizeof(efs))
3746 memcpy(&efs, (void *)val, olen);
3748 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3749 efs.stype != L2CAP_SERV_NOTRAFIC &&
3750 efs.stype != chan->local_stype)
3751 return -ECONNREFUSED;
3753 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3754 (unsigned long) &efs);
3757 case L2CAP_CONF_FCS:
3758 if (*result == L2CAP_CONF_PENDING)
3759 if (val == L2CAP_FCS_NONE)
3760 set_bit(CONF_RECV_NO_FCS,
3766 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3767 return -ECONNREFUSED;
3769 chan->mode = rfc.mode;
3771 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3773 case L2CAP_MODE_ERTM:
3774 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3775 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3776 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3777 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3778 chan->ack_win = min_t(u16, chan->ack_win,
3781 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3782 chan->local_msdu = le16_to_cpu(efs.msdu);
3783 chan->local_sdu_itime =
3784 le32_to_cpu(efs.sdu_itime);
3785 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3786 chan->local_flush_to =
3787 le32_to_cpu(efs.flush_to);
3791 case L2CAP_MODE_STREAMING:
3792 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3796 req->dcid = cpu_to_le16(chan->dcid);
3797 req->flags = __constant_cpu_to_le16(0);
3802 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3803 u16 result, u16 flags)
3805 struct l2cap_conf_rsp *rsp = data;
3806 void *ptr = rsp->data;
3808 BT_DBG("chan %p", chan);
3810 rsp->scid = cpu_to_le16(chan->dcid);
3811 rsp->result = cpu_to_le16(result);
3812 rsp->flags = cpu_to_le16(flags);
3817 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3819 struct l2cap_le_conn_rsp rsp;
3820 struct l2cap_conn *conn = chan->conn;
3822 BT_DBG("chan %p", chan);
3824 rsp.dcid = cpu_to_le16(chan->scid);
3825 rsp.mtu = cpu_to_le16(chan->imtu);
3826 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
3827 rsp.credits = cpu_to_le16(chan->rx_credits);
3828 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3830 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3834 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3836 struct l2cap_conn_rsp rsp;
3837 struct l2cap_conn *conn = chan->conn;
3841 rsp.scid = cpu_to_le16(chan->dcid);
3842 rsp.dcid = cpu_to_le16(chan->scid);
3843 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3844 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3847 rsp_code = L2CAP_CREATE_CHAN_RSP;
3849 rsp_code = L2CAP_CONN_RSP;
3851 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3853 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3855 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3858 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3859 l2cap_build_conf_req(chan, buf), buf);
3860 chan->num_conf_req++;
3863 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3867 /* Use sane default values in case a misbehaving remote device
3868 * did not send an RFC or extended window size option.
3870 u16 txwin_ext = chan->ack_win;
3871 struct l2cap_conf_rfc rfc = {
3873 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3874 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3875 .max_pdu_size = cpu_to_le16(chan->imtu),
3876 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3879 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3881 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3884 while (len >= L2CAP_CONF_OPT_SIZE) {
3885 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3888 case L2CAP_CONF_RFC:
3889 if (olen == sizeof(rfc))
3890 memcpy(&rfc, (void *)val, olen);
3892 case L2CAP_CONF_EWS:
3899 case L2CAP_MODE_ERTM:
3900 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3901 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3902 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3903 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3904 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3906 chan->ack_win = min_t(u16, chan->ack_win,
3909 case L2CAP_MODE_STREAMING:
3910 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3914 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3915 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3918 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3920 if (cmd_len < sizeof(*rej))
3923 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3926 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3927 cmd->ident == conn->info_ident) {
3928 cancel_delayed_work(&conn->info_timer);
3930 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3931 conn->info_ident = 0;
3933 l2cap_conn_start(conn);
3939 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3940 struct l2cap_cmd_hdr *cmd,
3941 u8 *data, u8 rsp_code, u8 amp_id)
3943 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3944 struct l2cap_conn_rsp rsp;
3945 struct l2cap_chan *chan = NULL, *pchan;
3946 int result, status = L2CAP_CS_NO_INFO;
3948 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3949 __le16 psm = req->psm;
3951 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3953 /* Check if we have socket listening on psm */
3954 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3955 &conn->hcon->dst, ACL_LINK);
3957 result = L2CAP_CR_BAD_PSM;
3961 mutex_lock(&conn->chan_lock);
3962 l2cap_chan_lock(pchan);
3964 /* Check if the ACL is secure enough (if not SDP) */
3965 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3966 !hci_conn_check_link_mode(conn->hcon)) {
3967 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3968 result = L2CAP_CR_SEC_BLOCK;
3972 result = L2CAP_CR_NO_MEM;
3974 /* Check if we already have channel with that dcid */
3975 if (__l2cap_get_chan_by_dcid(conn, scid))
3978 chan = pchan->ops->new_connection(pchan);
3982 /* For certain devices (ex: HID mouse), support for authentication,
3983 * pairing and bonding is optional. For such devices, inorder to avoid
3984 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3985 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3987 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3989 bacpy(&chan->src, &conn->hcon->src);
3990 bacpy(&chan->dst, &conn->hcon->dst);
3991 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3992 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3995 chan->local_amp_id = amp_id;
3997 __l2cap_chan_add(conn, chan);
4001 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4003 chan->ident = cmd->ident;
4005 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4006 if (l2cap_chan_check_security(chan)) {
4007 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4008 l2cap_state_change(chan, BT_CONNECT2);
4009 result = L2CAP_CR_PEND;
4010 status = L2CAP_CS_AUTHOR_PEND;
4011 chan->ops->defer(chan);
4013 /* Force pending result for AMP controllers.
4014 * The connection will succeed after the
4015 * physical link is up.
4017 if (amp_id == AMP_ID_BREDR) {
4018 l2cap_state_change(chan, BT_CONFIG);
4019 result = L2CAP_CR_SUCCESS;
4021 l2cap_state_change(chan, BT_CONNECT2);
4022 result = L2CAP_CR_PEND;
4024 status = L2CAP_CS_NO_INFO;
4027 l2cap_state_change(chan, BT_CONNECT2);
4028 result = L2CAP_CR_PEND;
4029 status = L2CAP_CS_AUTHEN_PEND;
4032 l2cap_state_change(chan, BT_CONNECT2);
4033 result = L2CAP_CR_PEND;
4034 status = L2CAP_CS_NO_INFO;
4038 l2cap_chan_unlock(pchan);
4039 mutex_unlock(&conn->chan_lock);
4042 rsp.scid = cpu_to_le16(scid);
4043 rsp.dcid = cpu_to_le16(dcid);
4044 rsp.result = cpu_to_le16(result);
4045 rsp.status = cpu_to_le16(status);
4046 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4048 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4049 struct l2cap_info_req info;
4050 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4052 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4053 conn->info_ident = l2cap_get_ident(conn);
4055 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4057 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4058 sizeof(info), &info);
4061 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4062 result == L2CAP_CR_SUCCESS) {
4064 set_bit(CONF_REQ_SENT, &chan->conf_state);
4065 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4066 l2cap_build_conf_req(chan, buf), buf);
4067 chan->num_conf_req++;
4073 static int l2cap_connect_req(struct l2cap_conn *conn,
4074 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4076 struct hci_dev *hdev = conn->hcon->hdev;
4077 struct hci_conn *hcon = conn->hcon;
4079 if (cmd_len < sizeof(struct l2cap_conn_req))
4083 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4084 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4085 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4086 hcon->dst_type, 0, NULL, 0,
4088 hci_dev_unlock(hdev);
4090 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4094 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4095 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4098 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4099 u16 scid, dcid, result, status;
4100 struct l2cap_chan *chan;
4104 if (cmd_len < sizeof(*rsp))
4107 scid = __le16_to_cpu(rsp->scid);
4108 dcid = __le16_to_cpu(rsp->dcid);
4109 result = __le16_to_cpu(rsp->result);
4110 status = __le16_to_cpu(rsp->status);
4112 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4113 dcid, scid, result, status);
4115 mutex_lock(&conn->chan_lock);
4118 chan = __l2cap_get_chan_by_scid(conn, scid);
4124 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4133 l2cap_chan_lock(chan);
4136 case L2CAP_CR_SUCCESS:
4137 l2cap_state_change(chan, BT_CONFIG);
4140 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4142 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4145 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4146 l2cap_build_conf_req(chan, req), req);
4147 chan->num_conf_req++;
4151 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4155 l2cap_chan_del(chan, ECONNREFUSED);
4159 l2cap_chan_unlock(chan);
4162 mutex_unlock(&conn->chan_lock);
4167 static inline void set_default_fcs(struct l2cap_chan *chan)
4169 /* FCS is enabled only in ERTM or streaming mode, if one or both
4172 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4173 chan->fcs = L2CAP_FCS_NONE;
4174 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4175 chan->fcs = L2CAP_FCS_CRC16;
4178 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4179 u8 ident, u16 flags)
4181 struct l2cap_conn *conn = chan->conn;
4183 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4186 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4187 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4189 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4190 l2cap_build_conf_rsp(chan, data,
4191 L2CAP_CONF_SUCCESS, flags), data);
4194 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4197 struct l2cap_cmd_rej_cid rej;
4199 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4200 rej.scid = __cpu_to_le16(scid);
4201 rej.dcid = __cpu_to_le16(dcid);
4203 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4206 static inline int l2cap_config_req(struct l2cap_conn *conn,
4207 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4210 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4213 struct l2cap_chan *chan;
4216 if (cmd_len < sizeof(*req))
4219 dcid = __le16_to_cpu(req->dcid);
4220 flags = __le16_to_cpu(req->flags);
4222 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4224 chan = l2cap_get_chan_by_scid(conn, dcid);
4226 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4230 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4231 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4236 /* Reject if config buffer is too small. */
4237 len = cmd_len - sizeof(*req);
4238 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4239 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4240 l2cap_build_conf_rsp(chan, rsp,
4241 L2CAP_CONF_REJECT, flags), rsp);
4246 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4247 chan->conf_len += len;
4249 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4250 /* Incomplete config. Send empty response. */
4251 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4252 l2cap_build_conf_rsp(chan, rsp,
4253 L2CAP_CONF_SUCCESS, flags), rsp);
4257 /* Complete config. */
4258 len = l2cap_parse_conf_req(chan, rsp);
4260 l2cap_send_disconn_req(chan, ECONNRESET);
4264 chan->ident = cmd->ident;
4265 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4266 chan->num_conf_rsp++;
4268 /* Reset config buffer. */
4271 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4274 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4275 set_default_fcs(chan);
4277 if (chan->mode == L2CAP_MODE_ERTM ||
4278 chan->mode == L2CAP_MODE_STREAMING)
4279 err = l2cap_ertm_init(chan);
4282 l2cap_send_disconn_req(chan, -err);
4284 l2cap_chan_ready(chan);
4289 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4291 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4292 l2cap_build_conf_req(chan, buf), buf);
4293 chan->num_conf_req++;
4296 /* Got Conf Rsp PENDING from remote side and asume we sent
4297 Conf Rsp PENDING in the code above */
4298 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4299 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4301 /* check compatibility */
4303 /* Send rsp for BR/EDR channel */
4305 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4307 chan->ident = cmd->ident;
4311 l2cap_chan_unlock(chan);
4315 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4316 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4319 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4320 u16 scid, flags, result;
4321 struct l2cap_chan *chan;
4322 int len = cmd_len - sizeof(*rsp);
4325 if (cmd_len < sizeof(*rsp))
4328 scid = __le16_to_cpu(rsp->scid);
4329 flags = __le16_to_cpu(rsp->flags);
4330 result = __le16_to_cpu(rsp->result);
4332 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4335 chan = l2cap_get_chan_by_scid(conn, scid);
4340 case L2CAP_CONF_SUCCESS:
4341 l2cap_conf_rfc_get(chan, rsp->data, len);
4342 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4345 case L2CAP_CONF_PENDING:
4346 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4348 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4351 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4354 l2cap_send_disconn_req(chan, ECONNRESET);
4358 if (!chan->hs_hcon) {
4359 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4362 if (l2cap_check_efs(chan)) {
4363 amp_create_logical_link(chan);
4364 chan->ident = cmd->ident;
4370 case L2CAP_CONF_UNACCEPT:
4371 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4374 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4375 l2cap_send_disconn_req(chan, ECONNRESET);
4379 /* throw out any old stored conf requests */
4380 result = L2CAP_CONF_SUCCESS;
4381 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4384 l2cap_send_disconn_req(chan, ECONNRESET);
4388 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4389 L2CAP_CONF_REQ, len, req);
4390 chan->num_conf_req++;
4391 if (result != L2CAP_CONF_SUCCESS)
4397 l2cap_chan_set_err(chan, ECONNRESET);
4399 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4400 l2cap_send_disconn_req(chan, ECONNRESET);
4404 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4407 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4409 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4410 set_default_fcs(chan);
4412 if (chan->mode == L2CAP_MODE_ERTM ||
4413 chan->mode == L2CAP_MODE_STREAMING)
4414 err = l2cap_ertm_init(chan);
4417 l2cap_send_disconn_req(chan, -err);
4419 l2cap_chan_ready(chan);
4423 l2cap_chan_unlock(chan);
4427 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4428 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4431 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4432 struct l2cap_disconn_rsp rsp;
4434 struct l2cap_chan *chan;
4436 if (cmd_len != sizeof(*req))
4439 scid = __le16_to_cpu(req->scid);
4440 dcid = __le16_to_cpu(req->dcid);
4442 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4444 mutex_lock(&conn->chan_lock);
4446 chan = __l2cap_get_chan_by_scid(conn, dcid);
4448 mutex_unlock(&conn->chan_lock);
4449 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4453 l2cap_chan_lock(chan);
4455 rsp.dcid = cpu_to_le16(chan->scid);
4456 rsp.scid = cpu_to_le16(chan->dcid);
4457 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4459 chan->ops->set_shutdown(chan);
4461 l2cap_chan_hold(chan);
4462 l2cap_chan_del(chan, ECONNRESET);
4464 l2cap_chan_unlock(chan);
4466 chan->ops->close(chan);
4467 l2cap_chan_put(chan);
4469 mutex_unlock(&conn->chan_lock);
4474 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4475 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4478 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4480 struct l2cap_chan *chan;
4482 if (cmd_len != sizeof(*rsp))
4485 scid = __le16_to_cpu(rsp->scid);
4486 dcid = __le16_to_cpu(rsp->dcid);
4488 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4490 mutex_lock(&conn->chan_lock);
4492 chan = __l2cap_get_chan_by_scid(conn, scid);
4494 mutex_unlock(&conn->chan_lock);
4498 l2cap_chan_lock(chan);
4500 l2cap_chan_hold(chan);
4501 l2cap_chan_del(chan, 0);
4503 l2cap_chan_unlock(chan);
4505 chan->ops->close(chan);
4506 l2cap_chan_put(chan);
4508 mutex_unlock(&conn->chan_lock);
4513 static inline int l2cap_information_req(struct l2cap_conn *conn,
4514 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4517 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4520 if (cmd_len != sizeof(*req))
4523 type = __le16_to_cpu(req->type);
4525 BT_DBG("type 0x%4.4x", type);
4527 if (type == L2CAP_IT_FEAT_MASK) {
4529 u32 feat_mask = l2cap_feat_mask;
4530 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4531 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4532 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4534 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4536 if (conn->hs_enabled)
4537 feat_mask |= L2CAP_FEAT_EXT_FLOW
4538 | L2CAP_FEAT_EXT_WINDOW;
4540 put_unaligned_le32(feat_mask, rsp->data);
4541 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4543 } else if (type == L2CAP_IT_FIXED_CHAN) {
4545 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4547 if (conn->hs_enabled)
4548 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4550 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4552 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4553 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4554 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4555 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4558 struct l2cap_info_rsp rsp;
4559 rsp.type = cpu_to_le16(type);
4560 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4561 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4568 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4569 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4572 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4575 if (cmd_len < sizeof(*rsp))
4578 type = __le16_to_cpu(rsp->type);
4579 result = __le16_to_cpu(rsp->result);
4581 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4583 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4584 if (cmd->ident != conn->info_ident ||
4585 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4588 cancel_delayed_work(&conn->info_timer);
4590 if (result != L2CAP_IR_SUCCESS) {
4591 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4592 conn->info_ident = 0;
4594 l2cap_conn_start(conn);
4600 case L2CAP_IT_FEAT_MASK:
4601 conn->feat_mask = get_unaligned_le32(rsp->data);
4603 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4604 struct l2cap_info_req req;
4605 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4607 conn->info_ident = l2cap_get_ident(conn);
4609 l2cap_send_cmd(conn, conn->info_ident,
4610 L2CAP_INFO_REQ, sizeof(req), &req);
4612 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4613 conn->info_ident = 0;
4615 l2cap_conn_start(conn);
4619 case L2CAP_IT_FIXED_CHAN:
4620 conn->fixed_chan_mask = rsp->data[0];
4621 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4622 conn->info_ident = 0;
4624 l2cap_conn_start(conn);
4631 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4632 struct l2cap_cmd_hdr *cmd,
4633 u16 cmd_len, void *data)
4635 struct l2cap_create_chan_req *req = data;
4636 struct l2cap_create_chan_rsp rsp;
4637 struct l2cap_chan *chan;
4638 struct hci_dev *hdev;
4641 if (cmd_len != sizeof(*req))
4644 if (!conn->hs_enabled)
4647 psm = le16_to_cpu(req->psm);
4648 scid = le16_to_cpu(req->scid);
4650 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4652 /* For controller id 0 make BR/EDR connection */
4653 if (req->amp_id == AMP_ID_BREDR) {
4654 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4659 /* Validate AMP controller id */
4660 hdev = hci_dev_get(req->amp_id);
4664 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4669 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4672 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4673 struct hci_conn *hs_hcon;
4675 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4679 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4684 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4686 mgr->bredr_chan = chan;
4687 chan->hs_hcon = hs_hcon;
4688 chan->fcs = L2CAP_FCS_NONE;
4689 conn->mtu = hdev->block_mtu;
4698 rsp.scid = cpu_to_le16(scid);
4699 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4700 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4702 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4708 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4710 struct l2cap_move_chan_req req;
4713 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4715 ident = l2cap_get_ident(chan->conn);
4716 chan->ident = ident;
4718 req.icid = cpu_to_le16(chan->scid);
4719 req.dest_amp_id = dest_amp_id;
4721 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4724 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4727 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4729 struct l2cap_move_chan_rsp rsp;
4731 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4733 rsp.icid = cpu_to_le16(chan->dcid);
4734 rsp.result = cpu_to_le16(result);
4736 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4740 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4742 struct l2cap_move_chan_cfm cfm;
4744 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4746 chan->ident = l2cap_get_ident(chan->conn);
4748 cfm.icid = cpu_to_le16(chan->scid);
4749 cfm.result = cpu_to_le16(result);
4751 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4754 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4757 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4759 struct l2cap_move_chan_cfm cfm;
4761 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4763 cfm.icid = cpu_to_le16(icid);
4764 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4766 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4770 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4773 struct l2cap_move_chan_cfm_rsp rsp;
4775 BT_DBG("icid 0x%4.4x", icid);
4777 rsp.icid = cpu_to_le16(icid);
4778 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4781 static void __release_logical_link(struct l2cap_chan *chan)
4783 chan->hs_hchan = NULL;
4784 chan->hs_hcon = NULL;
4786 /* Placeholder - release the logical link */
4789 static void l2cap_logical_fail(struct l2cap_chan *chan)
4791 /* Logical link setup failed */
4792 if (chan->state != BT_CONNECTED) {
4793 /* Create channel failure, disconnect */
4794 l2cap_send_disconn_req(chan, ECONNRESET);
4798 switch (chan->move_role) {
4799 case L2CAP_MOVE_ROLE_RESPONDER:
4800 l2cap_move_done(chan);
4801 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4803 case L2CAP_MOVE_ROLE_INITIATOR:
4804 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4805 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4806 /* Remote has only sent pending or
4807 * success responses, clean up
4809 l2cap_move_done(chan);
4812 /* Other amp move states imply that the move
4813 * has already aborted
4815 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4820 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4821 struct hci_chan *hchan)
4823 struct l2cap_conf_rsp rsp;
4825 chan->hs_hchan = hchan;
4826 chan->hs_hcon->l2cap_data = chan->conn;
4828 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4830 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4833 set_default_fcs(chan);
4835 err = l2cap_ertm_init(chan);
4837 l2cap_send_disconn_req(chan, -err);
4839 l2cap_chan_ready(chan);
4843 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4844 struct hci_chan *hchan)
4846 chan->hs_hcon = hchan->conn;
4847 chan->hs_hcon->l2cap_data = chan->conn;
4849 BT_DBG("move_state %d", chan->move_state);
4851 switch (chan->move_state) {
4852 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4853 /* Move confirm will be sent after a success
4854 * response is received
4856 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4858 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4859 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4860 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4861 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4862 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4863 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4864 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4865 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4866 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4870 /* Move was not in expected state, free the channel */
4871 __release_logical_link(chan);
4873 chan->move_state = L2CAP_MOVE_STABLE;
4877 /* Call with chan locked */
4878 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4881 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4884 l2cap_logical_fail(chan);
4885 __release_logical_link(chan);
4889 if (chan->state != BT_CONNECTED) {
4890 /* Ignore logical link if channel is on BR/EDR */
4891 if (chan->local_amp_id != AMP_ID_BREDR)
4892 l2cap_logical_finish_create(chan, hchan);
4894 l2cap_logical_finish_move(chan, hchan);
4898 void l2cap_move_start(struct l2cap_chan *chan)
4900 BT_DBG("chan %p", chan);
4902 if (chan->local_amp_id == AMP_ID_BREDR) {
4903 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4905 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4906 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4907 /* Placeholder - start physical link setup */
4909 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4910 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4912 l2cap_move_setup(chan);
4913 l2cap_send_move_chan_req(chan, 0);
4917 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4918 u8 local_amp_id, u8 remote_amp_id)
4920 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4921 local_amp_id, remote_amp_id);
4923 chan->fcs = L2CAP_FCS_NONE;
4925 /* Outgoing channel on AMP */
4926 if (chan->state == BT_CONNECT) {
4927 if (result == L2CAP_CR_SUCCESS) {
4928 chan->local_amp_id = local_amp_id;
4929 l2cap_send_create_chan_req(chan, remote_amp_id);
4931 /* Revert to BR/EDR connect */
4932 l2cap_send_conn_req(chan);
4938 /* Incoming channel on AMP */
4939 if (__l2cap_no_conn_pending(chan)) {
4940 struct l2cap_conn_rsp rsp;
4942 rsp.scid = cpu_to_le16(chan->dcid);
4943 rsp.dcid = cpu_to_le16(chan->scid);
4945 if (result == L2CAP_CR_SUCCESS) {
4946 /* Send successful response */
4947 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4948 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4950 /* Send negative response */
4951 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4952 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4955 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4958 if (result == L2CAP_CR_SUCCESS) {
4959 l2cap_state_change(chan, BT_CONFIG);
4960 set_bit(CONF_REQ_SENT, &chan->conf_state);
4961 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4963 l2cap_build_conf_req(chan, buf), buf);
4964 chan->num_conf_req++;
4969 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4972 l2cap_move_setup(chan);
4973 chan->move_id = local_amp_id;
4974 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4976 l2cap_send_move_chan_req(chan, remote_amp_id);
4979 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4981 struct hci_chan *hchan = NULL;
4983 /* Placeholder - get hci_chan for logical link */
4986 if (hchan->state == BT_CONNECTED) {
4987 /* Logical link is ready to go */
4988 chan->hs_hcon = hchan->conn;
4989 chan->hs_hcon->l2cap_data = chan->conn;
4990 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4991 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4993 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4995 /* Wait for logical link to be ready */
4996 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4999 /* Logical link not available */
5000 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5004 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5006 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5008 if (result == -EINVAL)
5009 rsp_result = L2CAP_MR_BAD_ID;
5011 rsp_result = L2CAP_MR_NOT_ALLOWED;
5013 l2cap_send_move_chan_rsp(chan, rsp_result);
5016 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5017 chan->move_state = L2CAP_MOVE_STABLE;
5019 /* Restart data transmission */
5020 l2cap_ertm_send(chan);
5023 /* Invoke with locked chan */
5024 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5026 u8 local_amp_id = chan->local_amp_id;
5027 u8 remote_amp_id = chan->remote_amp_id;
5029 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5030 chan, result, local_amp_id, remote_amp_id);
5032 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5033 l2cap_chan_unlock(chan);
5037 if (chan->state != BT_CONNECTED) {
5038 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5039 } else if (result != L2CAP_MR_SUCCESS) {
5040 l2cap_do_move_cancel(chan, result);
5042 switch (chan->move_role) {
5043 case L2CAP_MOVE_ROLE_INITIATOR:
5044 l2cap_do_move_initiate(chan, local_amp_id,
5047 case L2CAP_MOVE_ROLE_RESPONDER:
5048 l2cap_do_move_respond(chan, result);
5051 l2cap_do_move_cancel(chan, result);
5057 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5058 struct l2cap_cmd_hdr *cmd,
5059 u16 cmd_len, void *data)
5061 struct l2cap_move_chan_req *req = data;
5062 struct l2cap_move_chan_rsp rsp;
5063 struct l2cap_chan *chan;
5065 u16 result = L2CAP_MR_NOT_ALLOWED;
5067 if (cmd_len != sizeof(*req))
5070 icid = le16_to_cpu(req->icid);
5072 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5074 if (!conn->hs_enabled)
5077 chan = l2cap_get_chan_by_dcid(conn, icid);
5079 rsp.icid = cpu_to_le16(icid);
5080 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5081 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5086 chan->ident = cmd->ident;
5088 if (chan->scid < L2CAP_CID_DYN_START ||
5089 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5090 (chan->mode != L2CAP_MODE_ERTM &&
5091 chan->mode != L2CAP_MODE_STREAMING)) {
5092 result = L2CAP_MR_NOT_ALLOWED;
5093 goto send_move_response;
5096 if (chan->local_amp_id == req->dest_amp_id) {
5097 result = L2CAP_MR_SAME_ID;
5098 goto send_move_response;
5101 if (req->dest_amp_id != AMP_ID_BREDR) {
5102 struct hci_dev *hdev;
5103 hdev = hci_dev_get(req->dest_amp_id);
5104 if (!hdev || hdev->dev_type != HCI_AMP ||
5105 !test_bit(HCI_UP, &hdev->flags)) {
5109 result = L2CAP_MR_BAD_ID;
5110 goto send_move_response;
5115 /* Detect a move collision. Only send a collision response
5116 * if this side has "lost", otherwise proceed with the move.
5117 * The winner has the larger bd_addr.
5119 if ((__chan_is_moving(chan) ||
5120 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5121 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5122 result = L2CAP_MR_COLLISION;
5123 goto send_move_response;
5126 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5127 l2cap_move_setup(chan);
5128 chan->move_id = req->dest_amp_id;
5131 if (req->dest_amp_id == AMP_ID_BREDR) {
5132 /* Moving to BR/EDR */
5133 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5134 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5135 result = L2CAP_MR_PEND;
5137 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5138 result = L2CAP_MR_SUCCESS;
5141 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5142 /* Placeholder - uncomment when amp functions are available */
5143 /*amp_accept_physical(chan, req->dest_amp_id);*/
5144 result = L2CAP_MR_PEND;
5148 l2cap_send_move_chan_rsp(chan, result);
5150 l2cap_chan_unlock(chan);
5155 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5157 struct l2cap_chan *chan;
5158 struct hci_chan *hchan = NULL;
5160 chan = l2cap_get_chan_by_scid(conn, icid);
5162 l2cap_send_move_chan_cfm_icid(conn, icid);
5166 __clear_chan_timer(chan);
5167 if (result == L2CAP_MR_PEND)
5168 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5170 switch (chan->move_state) {
5171 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5172 /* Move confirm will be sent when logical link
5175 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5177 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5178 if (result == L2CAP_MR_PEND) {
5180 } else if (test_bit(CONN_LOCAL_BUSY,
5181 &chan->conn_state)) {
5182 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5184 /* Logical link is up or moving to BR/EDR,
5187 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5188 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5191 case L2CAP_MOVE_WAIT_RSP:
5193 if (result == L2CAP_MR_SUCCESS) {
5194 /* Remote is ready, send confirm immediately
5195 * after logical link is ready
5197 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5199 /* Both logical link and move success
5200 * are required to confirm
5202 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5205 /* Placeholder - get hci_chan for logical link */
5207 /* Logical link not available */
5208 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5212 /* If the logical link is not yet connected, do not
5213 * send confirmation.
5215 if (hchan->state != BT_CONNECTED)
5218 /* Logical link is already ready to go */
5220 chan->hs_hcon = hchan->conn;
5221 chan->hs_hcon->l2cap_data = chan->conn;
5223 if (result == L2CAP_MR_SUCCESS) {
5224 /* Can confirm now */
5225 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5227 /* Now only need move success
5230 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5233 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5236 /* Any other amp move state means the move failed. */
5237 chan->move_id = chan->local_amp_id;
5238 l2cap_move_done(chan);
5239 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5242 l2cap_chan_unlock(chan);
5245 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5248 struct l2cap_chan *chan;
5250 chan = l2cap_get_chan_by_ident(conn, ident);
5252 /* Could not locate channel, icid is best guess */
5253 l2cap_send_move_chan_cfm_icid(conn, icid);
5257 __clear_chan_timer(chan);
5259 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5260 if (result == L2CAP_MR_COLLISION) {
5261 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5263 /* Cleanup - cancel move */
5264 chan->move_id = chan->local_amp_id;
5265 l2cap_move_done(chan);
5269 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5271 l2cap_chan_unlock(chan);
5274 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5275 struct l2cap_cmd_hdr *cmd,
5276 u16 cmd_len, void *data)
5278 struct l2cap_move_chan_rsp *rsp = data;
5281 if (cmd_len != sizeof(*rsp))
5284 icid = le16_to_cpu(rsp->icid);
5285 result = le16_to_cpu(rsp->result);
5287 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5289 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5290 l2cap_move_continue(conn, icid, result);
5292 l2cap_move_fail(conn, cmd->ident, icid, result);
5297 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5298 struct l2cap_cmd_hdr *cmd,
5299 u16 cmd_len, void *data)
5301 struct l2cap_move_chan_cfm *cfm = data;
5302 struct l2cap_chan *chan;
5305 if (cmd_len != sizeof(*cfm))
5308 icid = le16_to_cpu(cfm->icid);
5309 result = le16_to_cpu(cfm->result);
5311 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5313 chan = l2cap_get_chan_by_dcid(conn, icid);
5315 /* Spec requires a response even if the icid was not found */
5316 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5320 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5321 if (result == L2CAP_MC_CONFIRMED) {
5322 chan->local_amp_id = chan->move_id;
5323 if (chan->local_amp_id == AMP_ID_BREDR)
5324 __release_logical_link(chan);
5326 chan->move_id = chan->local_amp_id;
5329 l2cap_move_done(chan);
5332 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5334 l2cap_chan_unlock(chan);
5339 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5340 struct l2cap_cmd_hdr *cmd,
5341 u16 cmd_len, void *data)
5343 struct l2cap_move_chan_cfm_rsp *rsp = data;
5344 struct l2cap_chan *chan;
5347 if (cmd_len != sizeof(*rsp))
5350 icid = le16_to_cpu(rsp->icid);
5352 BT_DBG("icid 0x%4.4x", icid);
5354 chan = l2cap_get_chan_by_scid(conn, icid);
5358 __clear_chan_timer(chan);
5360 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5361 chan->local_amp_id = chan->move_id;
5363 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5364 __release_logical_link(chan);
5366 l2cap_move_done(chan);
5369 l2cap_chan_unlock(chan);
5374 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5379 if (min > max || min < 6 || max > 3200)
5382 if (to_multiplier < 10 || to_multiplier > 3200)
5385 if (max >= to_multiplier * 8)
5388 max_latency = (to_multiplier * 8 / max) - 1;
5389 if (latency > 499 || latency > max_latency)
5395 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5396 struct l2cap_cmd_hdr *cmd,
5397 u16 cmd_len, u8 *data)
5399 struct hci_conn *hcon = conn->hcon;
5400 struct l2cap_conn_param_update_req *req;
5401 struct l2cap_conn_param_update_rsp rsp;
5402 u16 min, max, latency, to_multiplier;
5405 if (!(hcon->link_mode & HCI_LM_MASTER))
5408 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5411 req = (struct l2cap_conn_param_update_req *) data;
5412 min = __le16_to_cpu(req->min);
5413 max = __le16_to_cpu(req->max);
5414 latency = __le16_to_cpu(req->latency);
5415 to_multiplier = __le16_to_cpu(req->to_multiplier);
5417 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5418 min, max, latency, to_multiplier);
5420 memset(&rsp, 0, sizeof(rsp));
5422 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5424 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5426 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5428 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5432 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5437 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5438 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5441 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5442 u16 dcid, mtu, mps, credits, result;
5443 struct l2cap_chan *chan;
5446 if (cmd_len < sizeof(*rsp))
5449 dcid = __le16_to_cpu(rsp->dcid);
5450 mtu = __le16_to_cpu(rsp->mtu);
5451 mps = __le16_to_cpu(rsp->mps);
5452 credits = __le16_to_cpu(rsp->credits);
5453 result = __le16_to_cpu(rsp->result);
5455 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5458 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5459 dcid, mtu, mps, credits, result);
5461 mutex_lock(&conn->chan_lock);
5463 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5471 l2cap_chan_lock(chan);
5474 case L2CAP_CR_SUCCESS:
5478 chan->remote_mps = mps;
5479 chan->tx_credits = credits;
5480 l2cap_chan_ready(chan);
5484 l2cap_chan_del(chan, ECONNREFUSED);
5488 l2cap_chan_unlock(chan);
5491 mutex_unlock(&conn->chan_lock);
5496 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5497 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5502 switch (cmd->code) {
5503 case L2CAP_COMMAND_REJ:
5504 l2cap_command_rej(conn, cmd, cmd_len, data);
5507 case L2CAP_CONN_REQ:
5508 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5511 case L2CAP_CONN_RSP:
5512 case L2CAP_CREATE_CHAN_RSP:
5513 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5516 case L2CAP_CONF_REQ:
5517 err = l2cap_config_req(conn, cmd, cmd_len, data);
5520 case L2CAP_CONF_RSP:
5521 l2cap_config_rsp(conn, cmd, cmd_len, data);
5524 case L2CAP_DISCONN_REQ:
5525 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5528 case L2CAP_DISCONN_RSP:
5529 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5532 case L2CAP_ECHO_REQ:
5533 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5536 case L2CAP_ECHO_RSP:
5539 case L2CAP_INFO_REQ:
5540 err = l2cap_information_req(conn, cmd, cmd_len, data);
5543 case L2CAP_INFO_RSP:
5544 l2cap_information_rsp(conn, cmd, cmd_len, data);
5547 case L2CAP_CREATE_CHAN_REQ:
5548 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5551 case L2CAP_MOVE_CHAN_REQ:
5552 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5555 case L2CAP_MOVE_CHAN_RSP:
5556 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5559 case L2CAP_MOVE_CHAN_CFM:
5560 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5563 case L2CAP_MOVE_CHAN_CFM_RSP:
5564 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5568 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5576 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5577 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5580 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5581 struct l2cap_le_conn_rsp rsp;
5582 struct l2cap_chan *chan, *pchan;
5583 u16 dcid, scid, credits, mtu, mps;
5587 if (cmd_len != sizeof(*req))
5590 scid = __le16_to_cpu(req->scid);
5591 mtu = __le16_to_cpu(req->mtu);
5592 mps = __le16_to_cpu(req->mps);
5597 if (mtu < 23 || mps < 23)
5600 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5603 /* Check if we have socket listening on psm */
5604 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5605 &conn->hcon->dst, LE_LINK);
5607 result = L2CAP_CR_BAD_PSM;
5612 mutex_lock(&conn->chan_lock);
5613 l2cap_chan_lock(pchan);
5615 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5616 result = L2CAP_CR_AUTHENTICATION;
5618 goto response_unlock;
5621 /* Check if we already have channel with that dcid */
5622 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5623 result = L2CAP_CR_NO_MEM;
5625 goto response_unlock;
5628 chan = pchan->ops->new_connection(pchan);
5630 result = L2CAP_CR_NO_MEM;
5631 goto response_unlock;
5634 bacpy(&chan->src, &conn->hcon->src);
5635 bacpy(&chan->dst, &conn->hcon->dst);
5636 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5637 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5641 chan->remote_mps = mps;
5642 chan->tx_credits = __le16_to_cpu(req->credits);
5644 __l2cap_chan_add(conn, chan);
5646 credits = chan->rx_credits;
5648 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5650 chan->ident = cmd->ident;
5652 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5653 l2cap_state_change(chan, BT_CONNECT2);
5654 result = L2CAP_CR_PEND;
5655 chan->ops->defer(chan);
5657 l2cap_chan_ready(chan);
5658 result = L2CAP_CR_SUCCESS;
5662 l2cap_chan_unlock(pchan);
5663 mutex_unlock(&conn->chan_lock);
5665 if (result == L2CAP_CR_PEND)
5670 rsp.mtu = cpu_to_le16(chan->imtu);
5671 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
5677 rsp.dcid = cpu_to_le16(dcid);
5678 rsp.credits = cpu_to_le16(credits);
5679 rsp.result = cpu_to_le16(result);
5681 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5686 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5687 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5690 struct l2cap_le_credits *pkt;
5691 struct l2cap_chan *chan;
5694 if (cmd_len != sizeof(*pkt))
5697 pkt = (struct l2cap_le_credits *) data;
5698 cid = __le16_to_cpu(pkt->cid);
5699 credits = __le16_to_cpu(pkt->credits);
5701 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5703 chan = l2cap_get_chan_by_dcid(conn, cid);
5707 chan->tx_credits += credits;
5709 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5710 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5714 if (chan->tx_credits)
5715 chan->ops->resume(chan);
5717 l2cap_chan_unlock(chan);
5722 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5723 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5728 if (!enable_lecoc) {
5729 switch (cmd->code) {
5730 case L2CAP_LE_CONN_REQ:
5731 case L2CAP_LE_CONN_RSP:
5732 case L2CAP_LE_CREDITS:
5733 case L2CAP_DISCONN_REQ:
5734 case L2CAP_DISCONN_RSP:
5739 switch (cmd->code) {
5740 case L2CAP_COMMAND_REJ:
5743 case L2CAP_CONN_PARAM_UPDATE_REQ:
5744 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5747 case L2CAP_CONN_PARAM_UPDATE_RSP:
5750 case L2CAP_LE_CONN_RSP:
5751 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5754 case L2CAP_LE_CONN_REQ:
5755 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5758 case L2CAP_LE_CREDITS:
5759 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5762 case L2CAP_DISCONN_REQ:
5763 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5766 case L2CAP_DISCONN_RSP:
5767 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5771 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5779 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5780 struct sk_buff *skb)
5782 struct hci_conn *hcon = conn->hcon;
5783 struct l2cap_cmd_hdr *cmd;
5787 if (hcon->type != LE_LINK)
5790 if (skb->len < L2CAP_CMD_HDR_SIZE)
5793 cmd = (void *) skb->data;
5794 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5796 len = le16_to_cpu(cmd->len);
5798 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5800 if (len != skb->len || !cmd->ident) {
5801 BT_DBG("corrupted command");
5805 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5807 struct l2cap_cmd_rej_unk rej;
5809 BT_ERR("Wrong link type (%d)", err);
5811 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5812 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5820 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5821 struct sk_buff *skb)
5823 struct hci_conn *hcon = conn->hcon;
5824 u8 *data = skb->data;
5826 struct l2cap_cmd_hdr cmd;
5829 l2cap_raw_recv(conn, skb);
5831 if (hcon->type != ACL_LINK)
5834 while (len >= L2CAP_CMD_HDR_SIZE) {
5836 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5837 data += L2CAP_CMD_HDR_SIZE;
5838 len -= L2CAP_CMD_HDR_SIZE;
5840 cmd_len = le16_to_cpu(cmd.len);
5842 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5845 if (cmd_len > len || !cmd.ident) {
5846 BT_DBG("corrupted command");
5850 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5852 struct l2cap_cmd_rej_unk rej;
5854 BT_ERR("Wrong link type (%d)", err);
5856 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5857 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5869 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5871 u16 our_fcs, rcv_fcs;
5874 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5875 hdr_size = L2CAP_EXT_HDR_SIZE;
5877 hdr_size = L2CAP_ENH_HDR_SIZE;
5879 if (chan->fcs == L2CAP_FCS_CRC16) {
5880 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5881 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5882 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5884 if (our_fcs != rcv_fcs)
5890 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5892 struct l2cap_ctrl control;
5894 BT_DBG("chan %p", chan);
5896 memset(&control, 0, sizeof(control));
5899 control.reqseq = chan->buffer_seq;
5900 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5902 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5903 control.super = L2CAP_SUPER_RNR;
5904 l2cap_send_sframe(chan, &control);
5907 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5908 chan->unacked_frames > 0)
5909 __set_retrans_timer(chan);
5911 /* Send pending iframes */
5912 l2cap_ertm_send(chan);
5914 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5915 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5916 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5919 control.super = L2CAP_SUPER_RR;
5920 l2cap_send_sframe(chan, &control);
5924 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5925 struct sk_buff **last_frag)
5927 /* skb->len reflects data in skb as well as all fragments
5928 * skb->data_len reflects only data in fragments
5930 if (!skb_has_frag_list(skb))
5931 skb_shinfo(skb)->frag_list = new_frag;
5933 new_frag->next = NULL;
5935 (*last_frag)->next = new_frag;
5936 *last_frag = new_frag;
5938 skb->len += new_frag->len;
5939 skb->data_len += new_frag->len;
5940 skb->truesize += new_frag->truesize;
5943 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5944 struct l2cap_ctrl *control)
5948 switch (control->sar) {
5949 case L2CAP_SAR_UNSEGMENTED:
5953 err = chan->ops->recv(chan, skb);
5956 case L2CAP_SAR_START:
5960 chan->sdu_len = get_unaligned_le16(skb->data);
5961 skb_pull(skb, L2CAP_SDULEN_SIZE);
5963 if (chan->sdu_len > chan->imtu) {
5968 if (skb->len >= chan->sdu_len)
5972 chan->sdu_last_frag = skb;
5978 case L2CAP_SAR_CONTINUE:
5982 append_skb_frag(chan->sdu, skb,
5983 &chan->sdu_last_frag);
5986 if (chan->sdu->len >= chan->sdu_len)
5996 append_skb_frag(chan->sdu, skb,
5997 &chan->sdu_last_frag);
6000 if (chan->sdu->len != chan->sdu_len)
6003 err = chan->ops->recv(chan, chan->sdu);
6006 /* Reassembly complete */
6008 chan->sdu_last_frag = NULL;
6016 kfree_skb(chan->sdu);
6018 chan->sdu_last_frag = NULL;
6025 static int l2cap_resegment(struct l2cap_chan *chan)
6031 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6035 if (chan->mode != L2CAP_MODE_ERTM)
6038 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6039 l2cap_tx(chan, NULL, NULL, event);
6042 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6045 /* Pass sequential frames to l2cap_reassemble_sdu()
6046 * until a gap is encountered.
6049 BT_DBG("chan %p", chan);
6051 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6052 struct sk_buff *skb;
6053 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6054 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6056 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6061 skb_unlink(skb, &chan->srej_q);
6062 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6063 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6068 if (skb_queue_empty(&chan->srej_q)) {
6069 chan->rx_state = L2CAP_RX_STATE_RECV;
6070 l2cap_send_ack(chan);
6076 static void l2cap_handle_srej(struct l2cap_chan *chan,
6077 struct l2cap_ctrl *control)
6079 struct sk_buff *skb;
6081 BT_DBG("chan %p, control %p", chan, control);
6083 if (control->reqseq == chan->next_tx_seq) {
6084 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6085 l2cap_send_disconn_req(chan, ECONNRESET);
6089 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6092 BT_DBG("Seq %d not available for retransmission",
6097 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6098 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6099 l2cap_send_disconn_req(chan, ECONNRESET);
6103 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6105 if (control->poll) {
6106 l2cap_pass_to_tx(chan, control);
6108 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6109 l2cap_retransmit(chan, control);
6110 l2cap_ertm_send(chan);
6112 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6113 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6114 chan->srej_save_reqseq = control->reqseq;
6117 l2cap_pass_to_tx_fbit(chan, control);
6119 if (control->final) {
6120 if (chan->srej_save_reqseq != control->reqseq ||
6121 !test_and_clear_bit(CONN_SREJ_ACT,
6123 l2cap_retransmit(chan, control);
6125 l2cap_retransmit(chan, control);
6126 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6127 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6128 chan->srej_save_reqseq = control->reqseq;
6134 static void l2cap_handle_rej(struct l2cap_chan *chan,
6135 struct l2cap_ctrl *control)
6137 struct sk_buff *skb;
6139 BT_DBG("chan %p, control %p", chan, control);
6141 if (control->reqseq == chan->next_tx_seq) {
6142 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6143 l2cap_send_disconn_req(chan, ECONNRESET);
6147 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6149 if (chan->max_tx && skb &&
6150 bt_cb(skb)->control.retries >= chan->max_tx) {
6151 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6152 l2cap_send_disconn_req(chan, ECONNRESET);
6156 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6158 l2cap_pass_to_tx(chan, control);
6160 if (control->final) {
6161 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6162 l2cap_retransmit_all(chan, control);
6164 l2cap_retransmit_all(chan, control);
6165 l2cap_ertm_send(chan);
6166 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6167 set_bit(CONN_REJ_ACT, &chan->conn_state);
6171 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6173 BT_DBG("chan %p, txseq %d", chan, txseq);
6175 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6176 chan->expected_tx_seq);
6178 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6179 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6181 /* See notes below regarding "double poll" and
6184 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6185 BT_DBG("Invalid/Ignore - after SREJ");
6186 return L2CAP_TXSEQ_INVALID_IGNORE;
6188 BT_DBG("Invalid - in window after SREJ sent");
6189 return L2CAP_TXSEQ_INVALID;
6193 if (chan->srej_list.head == txseq) {
6194 BT_DBG("Expected SREJ");
6195 return L2CAP_TXSEQ_EXPECTED_SREJ;
6198 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6199 BT_DBG("Duplicate SREJ - txseq already stored");
6200 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6203 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6204 BT_DBG("Unexpected SREJ - not requested");
6205 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6209 if (chan->expected_tx_seq == txseq) {
6210 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6212 BT_DBG("Invalid - txseq outside tx window");
6213 return L2CAP_TXSEQ_INVALID;
6216 return L2CAP_TXSEQ_EXPECTED;
6220 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6221 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6222 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6223 return L2CAP_TXSEQ_DUPLICATE;
6226 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6227 /* A source of invalid packets is a "double poll" condition,
6228 * where delays cause us to send multiple poll packets. If
6229 * the remote stack receives and processes both polls,
6230 * sequence numbers can wrap around in such a way that a
6231 * resent frame has a sequence number that looks like new data
6232 * with a sequence gap. This would trigger an erroneous SREJ
6235 * Fortunately, this is impossible with a tx window that's
6236 * less than half of the maximum sequence number, which allows
6237 * invalid frames to be safely ignored.
6239 * With tx window sizes greater than half of the tx window
6240 * maximum, the frame is invalid and cannot be ignored. This
6241 * causes a disconnect.
6244 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6245 BT_DBG("Invalid/Ignore - txseq outside tx window");
6246 return L2CAP_TXSEQ_INVALID_IGNORE;
6248 BT_DBG("Invalid - txseq outside tx window");
6249 return L2CAP_TXSEQ_INVALID;
6252 BT_DBG("Unexpected - txseq indicates missing frames");
6253 return L2CAP_TXSEQ_UNEXPECTED;
6257 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6258 struct l2cap_ctrl *control,
6259 struct sk_buff *skb, u8 event)
6262 bool skb_in_use = false;
6264 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6268 case L2CAP_EV_RECV_IFRAME:
6269 switch (l2cap_classify_txseq(chan, control->txseq)) {
6270 case L2CAP_TXSEQ_EXPECTED:
6271 l2cap_pass_to_tx(chan, control);
6273 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6274 BT_DBG("Busy, discarding expected seq %d",
6279 chan->expected_tx_seq = __next_seq(chan,
6282 chan->buffer_seq = chan->expected_tx_seq;
6285 err = l2cap_reassemble_sdu(chan, skb, control);
6289 if (control->final) {
6290 if (!test_and_clear_bit(CONN_REJ_ACT,
6291 &chan->conn_state)) {
6293 l2cap_retransmit_all(chan, control);
6294 l2cap_ertm_send(chan);
6298 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6299 l2cap_send_ack(chan);
6301 case L2CAP_TXSEQ_UNEXPECTED:
6302 l2cap_pass_to_tx(chan, control);
6304 /* Can't issue SREJ frames in the local busy state.
6305 * Drop this frame, it will be seen as missing
6306 * when local busy is exited.
6308 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6309 BT_DBG("Busy, discarding unexpected seq %d",
6314 /* There was a gap in the sequence, so an SREJ
6315 * must be sent for each missing frame. The
6316 * current frame is stored for later use.
6318 skb_queue_tail(&chan->srej_q, skb);
6320 BT_DBG("Queued %p (queue len %d)", skb,
6321 skb_queue_len(&chan->srej_q));
6323 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6324 l2cap_seq_list_clear(&chan->srej_list);
6325 l2cap_send_srej(chan, control->txseq);
6327 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6329 case L2CAP_TXSEQ_DUPLICATE:
6330 l2cap_pass_to_tx(chan, control);
6332 case L2CAP_TXSEQ_INVALID_IGNORE:
6334 case L2CAP_TXSEQ_INVALID:
6336 l2cap_send_disconn_req(chan, ECONNRESET);
6340 case L2CAP_EV_RECV_RR:
6341 l2cap_pass_to_tx(chan, control);
6342 if (control->final) {
6343 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6345 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6346 !__chan_is_moving(chan)) {
6348 l2cap_retransmit_all(chan, control);
6351 l2cap_ertm_send(chan);
6352 } else if (control->poll) {
6353 l2cap_send_i_or_rr_or_rnr(chan);
6355 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6356 &chan->conn_state) &&
6357 chan->unacked_frames)
6358 __set_retrans_timer(chan);
6360 l2cap_ertm_send(chan);
6363 case L2CAP_EV_RECV_RNR:
6364 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6365 l2cap_pass_to_tx(chan, control);
6366 if (control && control->poll) {
6367 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6368 l2cap_send_rr_or_rnr(chan, 0);
6370 __clear_retrans_timer(chan);
6371 l2cap_seq_list_clear(&chan->retrans_list);
6373 case L2CAP_EV_RECV_REJ:
6374 l2cap_handle_rej(chan, control);
6376 case L2CAP_EV_RECV_SREJ:
6377 l2cap_handle_srej(chan, control);
6383 if (skb && !skb_in_use) {
6384 BT_DBG("Freeing %p", skb);
6391 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6392 struct l2cap_ctrl *control,
6393 struct sk_buff *skb, u8 event)
6396 u16 txseq = control->txseq;
6397 bool skb_in_use = false;
6399 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6403 case L2CAP_EV_RECV_IFRAME:
6404 switch (l2cap_classify_txseq(chan, txseq)) {
6405 case L2CAP_TXSEQ_EXPECTED:
6406 /* Keep frame for reassembly later */
6407 l2cap_pass_to_tx(chan, control);
6408 skb_queue_tail(&chan->srej_q, skb);
6410 BT_DBG("Queued %p (queue len %d)", skb,
6411 skb_queue_len(&chan->srej_q));
6413 chan->expected_tx_seq = __next_seq(chan, txseq);
6415 case L2CAP_TXSEQ_EXPECTED_SREJ:
6416 l2cap_seq_list_pop(&chan->srej_list);
6418 l2cap_pass_to_tx(chan, control);
6419 skb_queue_tail(&chan->srej_q, skb);
6421 BT_DBG("Queued %p (queue len %d)", skb,
6422 skb_queue_len(&chan->srej_q));
6424 err = l2cap_rx_queued_iframes(chan);
6429 case L2CAP_TXSEQ_UNEXPECTED:
6430 /* Got a frame that can't be reassembled yet.
6431 * Save it for later, and send SREJs to cover
6432 * the missing frames.
6434 skb_queue_tail(&chan->srej_q, skb);
6436 BT_DBG("Queued %p (queue len %d)", skb,
6437 skb_queue_len(&chan->srej_q));
6439 l2cap_pass_to_tx(chan, control);
6440 l2cap_send_srej(chan, control->txseq);
6442 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6443 /* This frame was requested with an SREJ, but
6444 * some expected retransmitted frames are
6445 * missing. Request retransmission of missing
6448 skb_queue_tail(&chan->srej_q, skb);
6450 BT_DBG("Queued %p (queue len %d)", skb,
6451 skb_queue_len(&chan->srej_q));
6453 l2cap_pass_to_tx(chan, control);
6454 l2cap_send_srej_list(chan, control->txseq);
6456 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6457 /* We've already queued this frame. Drop this copy. */
6458 l2cap_pass_to_tx(chan, control);
6460 case L2CAP_TXSEQ_DUPLICATE:
6461 /* Expecting a later sequence number, so this frame
6462 * was already received. Ignore it completely.
6465 case L2CAP_TXSEQ_INVALID_IGNORE:
6467 case L2CAP_TXSEQ_INVALID:
6469 l2cap_send_disconn_req(chan, ECONNRESET);
6473 case L2CAP_EV_RECV_RR:
6474 l2cap_pass_to_tx(chan, control);
6475 if (control->final) {
6476 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6478 if (!test_and_clear_bit(CONN_REJ_ACT,
6479 &chan->conn_state)) {
6481 l2cap_retransmit_all(chan, control);
6484 l2cap_ertm_send(chan);
6485 } else if (control->poll) {
6486 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6487 &chan->conn_state) &&
6488 chan->unacked_frames) {
6489 __set_retrans_timer(chan);
6492 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6493 l2cap_send_srej_tail(chan);
6495 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6496 &chan->conn_state) &&
6497 chan->unacked_frames)
6498 __set_retrans_timer(chan);
6500 l2cap_send_ack(chan);
6503 case L2CAP_EV_RECV_RNR:
6504 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6505 l2cap_pass_to_tx(chan, control);
6506 if (control->poll) {
6507 l2cap_send_srej_tail(chan);
6509 struct l2cap_ctrl rr_control;
6510 memset(&rr_control, 0, sizeof(rr_control));
6511 rr_control.sframe = 1;
6512 rr_control.super = L2CAP_SUPER_RR;
6513 rr_control.reqseq = chan->buffer_seq;
6514 l2cap_send_sframe(chan, &rr_control);
6518 case L2CAP_EV_RECV_REJ:
6519 l2cap_handle_rej(chan, control);
6521 case L2CAP_EV_RECV_SREJ:
6522 l2cap_handle_srej(chan, control);
6526 if (skb && !skb_in_use) {
6527 BT_DBG("Freeing %p", skb);
6534 static int l2cap_finish_move(struct l2cap_chan *chan)
6536 BT_DBG("chan %p", chan);
6538 chan->rx_state = L2CAP_RX_STATE_RECV;
6541 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6543 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6545 return l2cap_resegment(chan);
6548 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6549 struct l2cap_ctrl *control,
6550 struct sk_buff *skb, u8 event)
6554 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6560 l2cap_process_reqseq(chan, control->reqseq);
6562 if (!skb_queue_empty(&chan->tx_q))
6563 chan->tx_send_head = skb_peek(&chan->tx_q);
6565 chan->tx_send_head = NULL;
6567 /* Rewind next_tx_seq to the point expected
6570 chan->next_tx_seq = control->reqseq;
6571 chan->unacked_frames = 0;
6573 err = l2cap_finish_move(chan);
6577 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6578 l2cap_send_i_or_rr_or_rnr(chan);
6580 if (event == L2CAP_EV_RECV_IFRAME)
6583 return l2cap_rx_state_recv(chan, control, NULL, event);
6586 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6587 struct l2cap_ctrl *control,
6588 struct sk_buff *skb, u8 event)
6592 if (!control->final)
6595 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6597 chan->rx_state = L2CAP_RX_STATE_RECV;
6598 l2cap_process_reqseq(chan, control->reqseq);
6600 if (!skb_queue_empty(&chan->tx_q))
6601 chan->tx_send_head = skb_peek(&chan->tx_q);
6603 chan->tx_send_head = NULL;
6605 /* Rewind next_tx_seq to the point expected
6608 chan->next_tx_seq = control->reqseq;
6609 chan->unacked_frames = 0;
6612 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6614 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6616 err = l2cap_resegment(chan);
6619 err = l2cap_rx_state_recv(chan, control, skb, event);
6624 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6626 /* Make sure reqseq is for a packet that has been sent but not acked */
6629 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6630 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6633 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6634 struct sk_buff *skb, u8 event)
6638 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6639 control, skb, event, chan->rx_state);
6641 if (__valid_reqseq(chan, control->reqseq)) {
6642 switch (chan->rx_state) {
6643 case L2CAP_RX_STATE_RECV:
6644 err = l2cap_rx_state_recv(chan, control, skb, event);
6646 case L2CAP_RX_STATE_SREJ_SENT:
6647 err = l2cap_rx_state_srej_sent(chan, control, skb,
6650 case L2CAP_RX_STATE_WAIT_P:
6651 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6653 case L2CAP_RX_STATE_WAIT_F:
6654 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6661 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6662 control->reqseq, chan->next_tx_seq,
6663 chan->expected_ack_seq);
6664 l2cap_send_disconn_req(chan, ECONNRESET);
6670 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6671 struct sk_buff *skb)
6675 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6678 if (l2cap_classify_txseq(chan, control->txseq) ==
6679 L2CAP_TXSEQ_EXPECTED) {
6680 l2cap_pass_to_tx(chan, control);
6682 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6683 __next_seq(chan, chan->buffer_seq));
6685 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6687 l2cap_reassemble_sdu(chan, skb, control);
6690 kfree_skb(chan->sdu);
6693 chan->sdu_last_frag = NULL;
6697 BT_DBG("Freeing %p", skb);
6702 chan->last_acked_seq = control->txseq;
6703 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6708 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6710 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6714 __unpack_control(chan, skb);
6719 * We can just drop the corrupted I-frame here.
6720 * Receiver will miss it and start proper recovery
6721 * procedures and ask for retransmission.
6723 if (l2cap_check_fcs(chan, skb))
6726 if (!control->sframe && control->sar == L2CAP_SAR_START)
6727 len -= L2CAP_SDULEN_SIZE;
6729 if (chan->fcs == L2CAP_FCS_CRC16)
6730 len -= L2CAP_FCS_SIZE;
6732 if (len > chan->mps) {
6733 l2cap_send_disconn_req(chan, ECONNRESET);
6737 if (!control->sframe) {
6740 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6741 control->sar, control->reqseq, control->final,
6744 /* Validate F-bit - F=0 always valid, F=1 only
6745 * valid in TX WAIT_F
6747 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6750 if (chan->mode != L2CAP_MODE_STREAMING) {
6751 event = L2CAP_EV_RECV_IFRAME;
6752 err = l2cap_rx(chan, control, skb, event);
6754 err = l2cap_stream_rx(chan, control, skb);
6758 l2cap_send_disconn_req(chan, ECONNRESET);
6760 const u8 rx_func_to_event[4] = {
6761 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6762 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6765 /* Only I-frames are expected in streaming mode */
6766 if (chan->mode == L2CAP_MODE_STREAMING)
6769 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6770 control->reqseq, control->final, control->poll,
6774 BT_ERR("Trailing bytes: %d in sframe", len);
6775 l2cap_send_disconn_req(chan, ECONNRESET);
6779 /* Validate F and P bits */
6780 if (control->final && (control->poll ||
6781 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6784 event = rx_func_to_event[control->super];
6785 if (l2cap_rx(chan, control, skb, event))
6786 l2cap_send_disconn_req(chan, ECONNRESET);
6796 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6798 struct l2cap_conn *conn = chan->conn;
6799 struct l2cap_le_credits pkt;
6802 /* We return more credits to the sender only after the amount of
6803 * credits falls below half of the initial amount.
6805 if (chan->rx_credits >= (L2CAP_LE_MAX_CREDITS + 1) / 2)
6808 return_credits = L2CAP_LE_MAX_CREDITS - chan->rx_credits;
6810 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6812 chan->rx_credits += return_credits;
6814 pkt.cid = cpu_to_le16(chan->scid);
6815 pkt.credits = cpu_to_le16(return_credits);
6817 chan->ident = l2cap_get_ident(conn);
6819 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6822 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6826 if (!chan->rx_credits) {
6827 BT_ERR("No credits to receive LE L2CAP data");
6831 if (chan->imtu < skb->len) {
6832 BT_ERR("Too big LE L2CAP PDU");
6837 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6839 l2cap_chan_le_send_credits(chan);
6846 sdu_len = get_unaligned_le16(skb->data);
6847 skb_pull(skb, L2CAP_SDULEN_SIZE);
6849 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6850 sdu_len, skb->len, chan->imtu);
6852 if (sdu_len > chan->imtu) {
6853 BT_ERR("Too big LE L2CAP SDU length received");
6858 if (skb->len > sdu_len) {
6859 BT_ERR("Too much LE L2CAP data received");
6864 if (skb->len == sdu_len)
6865 return chan->ops->recv(chan, skb);
6868 chan->sdu_len = sdu_len;
6869 chan->sdu_last_frag = skb;
6874 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6875 chan->sdu->len, skb->len, chan->sdu_len);
6877 if (chan->sdu->len + skb->len > chan->sdu_len) {
6878 BT_ERR("Too much LE L2CAP data received");
6883 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6886 if (chan->sdu->len == chan->sdu_len) {
6887 err = chan->ops->recv(chan, chan->sdu);
6890 chan->sdu_last_frag = NULL;
6898 kfree_skb(chan->sdu);
6900 chan->sdu_last_frag = NULL;
6904 /* We can't return an error here since we took care of the skb
6905 * freeing internally. An error return would cause the caller to
6906 * do a double-free of the skb.
6911 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6912 struct sk_buff *skb)
6914 struct l2cap_chan *chan;
6916 chan = l2cap_get_chan_by_scid(conn, cid);
6918 if (cid == L2CAP_CID_A2MP) {
6919 chan = a2mp_channel_create(conn, skb);
6925 l2cap_chan_lock(chan);
6927 BT_DBG("unknown cid 0x%4.4x", cid);
6928 /* Drop packet and return */
6934 BT_DBG("chan %p, len %d", chan, skb->len);
6936 if (chan->state != BT_CONNECTED)
6939 switch (chan->mode) {
6940 case L2CAP_MODE_LE_FLOWCTL:
6941 if (l2cap_le_data_rcv(chan, skb) < 0)
6946 case L2CAP_MODE_BASIC:
6947 /* If socket recv buffers overflows we drop data here
6948 * which is *bad* because L2CAP has to be reliable.
6949 * But we don't have any other choice. L2CAP doesn't
6950 * provide flow control mechanism. */
6952 if (chan->imtu < skb->len)
6955 if (!chan->ops->recv(chan, skb))
6959 case L2CAP_MODE_ERTM:
6960 case L2CAP_MODE_STREAMING:
6961 l2cap_data_rcv(chan, skb);
6965 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6973 l2cap_chan_unlock(chan);
6976 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6977 struct sk_buff *skb)
6979 struct hci_conn *hcon = conn->hcon;
6980 struct l2cap_chan *chan;
6982 if (hcon->type != ACL_LINK)
6985 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6990 BT_DBG("chan %p, len %d", chan, skb->len);
6992 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6995 if (chan->imtu < skb->len)
6998 /* Store remote BD_ADDR and PSM for msg_name */
6999 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7000 bt_cb(skb)->psm = psm;
7002 if (!chan->ops->recv(chan, skb))
7009 static void l2cap_att_channel(struct l2cap_conn *conn,
7010 struct sk_buff *skb)
7012 struct hci_conn *hcon = conn->hcon;
7013 struct l2cap_chan *chan;
7015 if (hcon->type != LE_LINK)
7018 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7019 &hcon->src, &hcon->dst);
7023 BT_DBG("chan %p, len %d", chan, skb->len);
7025 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7028 if (chan->imtu < skb->len)
7031 if (!chan->ops->recv(chan, skb))
7038 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7040 struct l2cap_hdr *lh = (void *) skb->data;
7044 skb_pull(skb, L2CAP_HDR_SIZE);
7045 cid = __le16_to_cpu(lh->cid);
7046 len = __le16_to_cpu(lh->len);
7048 if (len != skb->len) {
7053 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7056 case L2CAP_CID_SIGNALING:
7057 l2cap_sig_channel(conn, skb);
7060 case L2CAP_CID_CONN_LESS:
7061 psm = get_unaligned((__le16 *) skb->data);
7062 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7063 l2cap_conless_channel(conn, psm, skb);
7067 l2cap_att_channel(conn, skb);
7070 case L2CAP_CID_LE_SIGNALING:
7071 l2cap_le_sig_channel(conn, skb);
7075 if (smp_sig_channel(conn, skb))
7076 l2cap_conn_del(conn->hcon, EACCES);
7080 l2cap_data_channel(conn, cid, skb);
7085 /* ---- L2CAP interface with lower layer (HCI) ---- */
7087 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7089 int exact = 0, lm1 = 0, lm2 = 0;
7090 struct l2cap_chan *c;
7092 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7094 /* Find listening sockets and check their link_mode */
7095 read_lock(&chan_list_lock);
7096 list_for_each_entry(c, &chan_list, global_l) {
7097 if (c->state != BT_LISTEN)
7100 if (!bacmp(&c->src, &hdev->bdaddr)) {
7101 lm1 |= HCI_LM_ACCEPT;
7102 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7103 lm1 |= HCI_LM_MASTER;
7105 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7106 lm2 |= HCI_LM_ACCEPT;
7107 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7108 lm2 |= HCI_LM_MASTER;
7111 read_unlock(&chan_list_lock);
7113 return exact ? lm1 : lm2;
7116 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7118 struct l2cap_conn *conn;
7120 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7123 conn = l2cap_conn_add(hcon);
7125 l2cap_conn_ready(conn);
7127 l2cap_conn_del(hcon, bt_to_errno(status));
7131 int l2cap_disconn_ind(struct hci_conn *hcon)
7133 struct l2cap_conn *conn = hcon->l2cap_data;
7135 BT_DBG("hcon %p", hcon);
7138 return HCI_ERROR_REMOTE_USER_TERM;
7139 return conn->disc_reason;
7142 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7144 BT_DBG("hcon %p reason %d", hcon, reason);
7146 l2cap_conn_del(hcon, bt_to_errno(reason));
7149 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7151 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7154 if (encrypt == 0x00) {
7155 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7156 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7157 } else if (chan->sec_level == BT_SECURITY_HIGH)
7158 l2cap_chan_close(chan, ECONNREFUSED);
7160 if (chan->sec_level == BT_SECURITY_MEDIUM)
7161 __clear_chan_timer(chan);
7165 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7167 struct l2cap_conn *conn = hcon->l2cap_data;
7168 struct l2cap_chan *chan;
7173 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7175 if (hcon->type == LE_LINK) {
7176 if (!status && encrypt)
7177 smp_distribute_keys(conn, 0);
7178 cancel_delayed_work(&conn->security_timer);
7181 mutex_lock(&conn->chan_lock);
7183 list_for_each_entry(chan, &conn->chan_l, list) {
7184 l2cap_chan_lock(chan);
7186 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7187 state_to_string(chan->state));
7189 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7190 l2cap_chan_unlock(chan);
7194 if (chan->scid == L2CAP_CID_ATT) {
7195 if (!status && encrypt) {
7196 chan->sec_level = hcon->sec_level;
7197 l2cap_chan_ready(chan);
7200 l2cap_chan_unlock(chan);
7204 if (!__l2cap_no_conn_pending(chan)) {
7205 l2cap_chan_unlock(chan);
7209 if (!status && (chan->state == BT_CONNECTED ||
7210 chan->state == BT_CONFIG)) {
7211 chan->ops->resume(chan);
7212 l2cap_check_encryption(chan, encrypt);
7213 l2cap_chan_unlock(chan);
7217 if (chan->state == BT_CONNECT) {
7219 l2cap_start_connection(chan);
7221 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7222 } else if (chan->state == BT_CONNECT2) {
7223 struct l2cap_conn_rsp rsp;
7227 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7228 res = L2CAP_CR_PEND;
7229 stat = L2CAP_CS_AUTHOR_PEND;
7230 chan->ops->defer(chan);
7232 l2cap_state_change(chan, BT_CONFIG);
7233 res = L2CAP_CR_SUCCESS;
7234 stat = L2CAP_CS_NO_INFO;
7237 l2cap_state_change(chan, BT_DISCONN);
7238 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7239 res = L2CAP_CR_SEC_BLOCK;
7240 stat = L2CAP_CS_NO_INFO;
7243 rsp.scid = cpu_to_le16(chan->dcid);
7244 rsp.dcid = cpu_to_le16(chan->scid);
7245 rsp.result = cpu_to_le16(res);
7246 rsp.status = cpu_to_le16(stat);
7247 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7250 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7251 res == L2CAP_CR_SUCCESS) {
7253 set_bit(CONF_REQ_SENT, &chan->conf_state);
7254 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7256 l2cap_build_conf_req(chan, buf),
7258 chan->num_conf_req++;
7262 l2cap_chan_unlock(chan);
7265 mutex_unlock(&conn->chan_lock);
7270 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7272 struct l2cap_conn *conn = hcon->l2cap_data;
7273 struct l2cap_hdr *hdr;
7276 /* For AMP controller do not create l2cap conn */
7277 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7281 conn = l2cap_conn_add(hcon);
7286 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7290 case ACL_START_NO_FLUSH:
7293 BT_ERR("Unexpected start frame (len %d)", skb->len);
7294 kfree_skb(conn->rx_skb);
7295 conn->rx_skb = NULL;
7297 l2cap_conn_unreliable(conn, ECOMM);
7300 /* Start fragment always begin with Basic L2CAP header */
7301 if (skb->len < L2CAP_HDR_SIZE) {
7302 BT_ERR("Frame is too short (len %d)", skb->len);
7303 l2cap_conn_unreliable(conn, ECOMM);
7307 hdr = (struct l2cap_hdr *) skb->data;
7308 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7310 if (len == skb->len) {
7311 /* Complete frame received */
7312 l2cap_recv_frame(conn, skb);
7316 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7318 if (skb->len > len) {
7319 BT_ERR("Frame is too long (len %d, expected len %d)",
7321 l2cap_conn_unreliable(conn, ECOMM);
7325 /* Allocate skb for the complete frame (with header) */
7326 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7330 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7332 conn->rx_len = len - skb->len;
7336 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7338 if (!conn->rx_len) {
7339 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7340 l2cap_conn_unreliable(conn, ECOMM);
7344 if (skb->len > conn->rx_len) {
7345 BT_ERR("Fragment is too long (len %d, expected %d)",
7346 skb->len, conn->rx_len);
7347 kfree_skb(conn->rx_skb);
7348 conn->rx_skb = NULL;
7350 l2cap_conn_unreliable(conn, ECOMM);
7354 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7356 conn->rx_len -= skb->len;
7358 if (!conn->rx_len) {
7359 /* Complete frame received. l2cap_recv_frame
7360 * takes ownership of the skb so set the global
7361 * rx_skb pointer to NULL first.
7363 struct sk_buff *rx_skb = conn->rx_skb;
7364 conn->rx_skb = NULL;
7365 l2cap_recv_frame(conn, rx_skb);
7375 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7377 struct l2cap_chan *c;
7379 read_lock(&chan_list_lock);
7381 list_for_each_entry(c, &chan_list, global_l) {
7382 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7384 c->state, __le16_to_cpu(c->psm),
7385 c->scid, c->dcid, c->imtu, c->omtu,
7386 c->sec_level, c->mode);
7389 read_unlock(&chan_list_lock);
7394 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7396 return single_open(file, l2cap_debugfs_show, inode->i_private);
7399 static const struct file_operations l2cap_debugfs_fops = {
7400 .open = l2cap_debugfs_open,
7402 .llseek = seq_lseek,
7403 .release = single_release,
7406 static struct dentry *l2cap_debugfs;
7408 int __init l2cap_init(void)
7412 err = l2cap_init_sockets();
7416 if (IS_ERR_OR_NULL(bt_debugfs))
7419 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7420 NULL, &l2cap_debugfs_fops);
7425 void l2cap_exit(void)
7427 debugfs_remove(l2cap_debugfs);
7428 l2cap_cleanup_sockets();
7431 module_param(disable_ertm, bool, 0644);
7432 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");