2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void l2cap_le_flowctl_init(struct l2cap_chan *chan)
495 chan->imtu = L2CAP_DEFAULT_MTU;
496 chan->omtu = L2CAP_LE_MIN_MTU;
497 chan->mode = L2CAP_MODE_LE_FLOWCTL;
498 chan->tx_credits = 0;
499 chan->rx_credits = L2CAP_LE_MAX_CREDITS;
502 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
504 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
505 __le16_to_cpu(chan->psm), chan->dcid);
507 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
511 switch (chan->chan_type) {
512 case L2CAP_CHAN_CONN_ORIENTED:
513 if (conn->hcon->type == LE_LINK) {
515 chan->omtu = L2CAP_DEFAULT_MTU;
516 if (chan->dcid == L2CAP_CID_ATT)
517 chan->scid = L2CAP_CID_ATT;
519 chan->scid = l2cap_alloc_cid(conn);
521 /* Alloc CID for connection-oriented socket */
522 chan->scid = l2cap_alloc_cid(conn);
523 chan->omtu = L2CAP_DEFAULT_MTU;
527 case L2CAP_CHAN_CONN_LESS:
528 /* Connectionless socket */
529 chan->scid = L2CAP_CID_CONN_LESS;
530 chan->dcid = L2CAP_CID_CONN_LESS;
531 chan->omtu = L2CAP_DEFAULT_MTU;
534 case L2CAP_CHAN_CONN_FIX_A2MP:
535 chan->scid = L2CAP_CID_A2MP;
536 chan->dcid = L2CAP_CID_A2MP;
537 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
538 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
542 /* Raw socket can send/recv signalling messages only */
543 chan->scid = L2CAP_CID_SIGNALING;
544 chan->dcid = L2CAP_CID_SIGNALING;
545 chan->omtu = L2CAP_DEFAULT_MTU;
548 chan->local_id = L2CAP_BESTEFFORT_ID;
549 chan->local_stype = L2CAP_SERV_BESTEFFORT;
550 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
551 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
552 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
553 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
555 l2cap_chan_hold(chan);
557 hci_conn_hold(conn->hcon);
559 list_add(&chan->list, &conn->chan_l);
562 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
564 mutex_lock(&conn->chan_lock);
565 __l2cap_chan_add(conn, chan);
566 mutex_unlock(&conn->chan_lock);
569 void l2cap_chan_del(struct l2cap_chan *chan, int err)
571 struct l2cap_conn *conn = chan->conn;
573 __clear_chan_timer(chan);
575 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 /* Delete from channel list */
580 list_del(&chan->list);
582 l2cap_chan_put(chan);
586 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
587 hci_conn_drop(conn->hcon);
589 if (mgr && mgr->bredr_chan == chan)
590 mgr->bredr_chan = NULL;
593 if (chan->hs_hchan) {
594 struct hci_chan *hs_hchan = chan->hs_hchan;
596 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
597 amp_disconnect_logical_link(hs_hchan);
600 chan->ops->teardown(chan, err);
602 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
606 case L2CAP_MODE_BASIC:
609 case L2CAP_MODE_LE_FLOWCTL:
610 skb_queue_purge(&chan->tx_q);
613 case L2CAP_MODE_ERTM:
614 __clear_retrans_timer(chan);
615 __clear_monitor_timer(chan);
616 __clear_ack_timer(chan);
618 skb_queue_purge(&chan->srej_q);
620 l2cap_seq_list_free(&chan->srej_list);
621 l2cap_seq_list_free(&chan->retrans_list);
625 case L2CAP_MODE_STREAMING:
626 skb_queue_purge(&chan->tx_q);
633 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
635 struct l2cap_conn *conn = chan->conn;
636 struct l2cap_le_conn_rsp rsp;
639 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
640 result = L2CAP_CR_AUTHORIZATION;
642 result = L2CAP_CR_BAD_PSM;
644 l2cap_state_change(chan, BT_DISCONN);
646 rsp.dcid = cpu_to_le16(chan->scid);
647 rsp.mtu = cpu_to_le16(chan->imtu);
648 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
649 rsp.credits = cpu_to_le16(chan->rx_credits);
650 rsp.result = cpu_to_le16(result);
652 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
656 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
658 struct l2cap_conn *conn = chan->conn;
659 struct l2cap_conn_rsp rsp;
662 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 result = L2CAP_CR_SEC_BLOCK;
665 result = L2CAP_CR_BAD_PSM;
667 l2cap_state_change(chan, BT_DISCONN);
669 rsp.scid = cpu_to_le16(chan->dcid);
670 rsp.dcid = cpu_to_le16(chan->scid);
671 rsp.result = cpu_to_le16(result);
672 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
674 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
677 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
679 struct l2cap_conn *conn = chan->conn;
681 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
683 switch (chan->state) {
685 chan->ops->teardown(chan, 0);
690 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
691 * check for chan->psm.
693 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
694 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
695 l2cap_send_disconn_req(chan, reason);
697 l2cap_chan_del(chan, reason);
701 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
702 if (conn->hcon->type == ACL_LINK)
703 l2cap_chan_connect_reject(chan);
704 else if (conn->hcon->type == LE_LINK)
705 l2cap_chan_le_connect_reject(chan);
708 l2cap_chan_del(chan, reason);
713 l2cap_chan_del(chan, reason);
717 chan->ops->teardown(chan, 0);
722 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
724 switch (chan->chan_type) {
726 switch (chan->sec_level) {
727 case BT_SECURITY_HIGH:
728 return HCI_AT_DEDICATED_BONDING_MITM;
729 case BT_SECURITY_MEDIUM:
730 return HCI_AT_DEDICATED_BONDING;
732 return HCI_AT_NO_BONDING;
735 case L2CAP_CHAN_CONN_LESS:
736 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
737 if (chan->sec_level == BT_SECURITY_LOW)
738 chan->sec_level = BT_SECURITY_SDP;
740 if (chan->sec_level == BT_SECURITY_HIGH)
741 return HCI_AT_NO_BONDING_MITM;
743 return HCI_AT_NO_BONDING;
745 case L2CAP_CHAN_CONN_ORIENTED:
746 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
747 if (chan->sec_level == BT_SECURITY_LOW)
748 chan->sec_level = BT_SECURITY_SDP;
750 if (chan->sec_level == BT_SECURITY_HIGH)
751 return HCI_AT_NO_BONDING_MITM;
753 return HCI_AT_NO_BONDING;
757 switch (chan->sec_level) {
758 case BT_SECURITY_HIGH:
759 return HCI_AT_GENERAL_BONDING_MITM;
760 case BT_SECURITY_MEDIUM:
761 return HCI_AT_GENERAL_BONDING;
763 return HCI_AT_NO_BONDING;
769 /* Service level security */
770 int l2cap_chan_check_security(struct l2cap_chan *chan)
772 struct l2cap_conn *conn = chan->conn;
775 if (conn->hcon->type == LE_LINK)
776 return smp_conn_security(conn->hcon, chan->sec_level);
778 auth_type = l2cap_get_auth_type(chan);
780 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
783 static u8 l2cap_get_ident(struct l2cap_conn *conn)
787 /* Get next available identificator.
788 * 1 - 128 are used by kernel.
789 * 129 - 199 are reserved.
790 * 200 - 254 are used by utilities like l2ping, etc.
793 spin_lock(&conn->lock);
795 if (++conn->tx_ident > 128)
800 spin_unlock(&conn->lock);
805 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
808 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
811 BT_DBG("code 0x%2.2x", code);
816 if (lmp_no_flush_capable(conn->hcon->hdev))
817 flags = ACL_START_NO_FLUSH;
821 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
822 skb->priority = HCI_PRIO_MAX;
824 hci_send_acl(conn->hchan, skb, flags);
827 static bool __chan_is_moving(struct l2cap_chan *chan)
829 return chan->move_state != L2CAP_MOVE_STABLE &&
830 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
833 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
835 struct hci_conn *hcon = chan->conn->hcon;
838 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
841 if (chan->hs_hcon && !__chan_is_moving(chan)) {
843 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
850 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
851 lmp_no_flush_capable(hcon->hdev))
852 flags = ACL_START_NO_FLUSH;
856 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
857 hci_send_acl(chan->conn->hchan, skb, flags);
860 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
862 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
863 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
865 if (enh & L2CAP_CTRL_FRAME_TYPE) {
868 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
869 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
876 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
877 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
884 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
886 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
887 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
889 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
892 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
893 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
900 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
901 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
908 static inline void __unpack_control(struct l2cap_chan *chan,
911 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
912 __unpack_extended_control(get_unaligned_le32(skb->data),
913 &bt_cb(skb)->control);
914 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
916 __unpack_enhanced_control(get_unaligned_le16(skb->data),
917 &bt_cb(skb)->control);
918 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
922 static u32 __pack_extended_control(struct l2cap_ctrl *control)
926 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
927 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
929 if (control->sframe) {
930 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
931 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
932 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
934 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
935 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
941 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
945 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
946 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
948 if (control->sframe) {
949 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
950 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
951 packed |= L2CAP_CTRL_FRAME_TYPE;
953 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
954 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
960 static inline void __pack_control(struct l2cap_chan *chan,
961 struct l2cap_ctrl *control,
964 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
965 put_unaligned_le32(__pack_extended_control(control),
966 skb->data + L2CAP_HDR_SIZE);
968 put_unaligned_le16(__pack_enhanced_control(control),
969 skb->data + L2CAP_HDR_SIZE);
973 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
975 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
976 return L2CAP_EXT_HDR_SIZE;
978 return L2CAP_ENH_HDR_SIZE;
981 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
985 struct l2cap_hdr *lh;
986 int hlen = __ertm_hdr_size(chan);
988 if (chan->fcs == L2CAP_FCS_CRC16)
989 hlen += L2CAP_FCS_SIZE;
991 skb = bt_skb_alloc(hlen, GFP_KERNEL);
994 return ERR_PTR(-ENOMEM);
996 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
997 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
998 lh->cid = cpu_to_le16(chan->dcid);
1000 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1001 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1003 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1005 if (chan->fcs == L2CAP_FCS_CRC16) {
1006 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1007 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1010 skb->priority = HCI_PRIO_MAX;
1014 static void l2cap_send_sframe(struct l2cap_chan *chan,
1015 struct l2cap_ctrl *control)
1017 struct sk_buff *skb;
1020 BT_DBG("chan %p, control %p", chan, control);
1022 if (!control->sframe)
1025 if (__chan_is_moving(chan))
1028 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1032 if (control->super == L2CAP_SUPER_RR)
1033 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1034 else if (control->super == L2CAP_SUPER_RNR)
1035 set_bit(CONN_RNR_SENT, &chan->conn_state);
1037 if (control->super != L2CAP_SUPER_SREJ) {
1038 chan->last_acked_seq = control->reqseq;
1039 __clear_ack_timer(chan);
1042 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1043 control->final, control->poll, control->super);
1045 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1046 control_field = __pack_extended_control(control);
1048 control_field = __pack_enhanced_control(control);
1050 skb = l2cap_create_sframe_pdu(chan, control_field);
1052 l2cap_do_send(chan, skb);
1055 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1057 struct l2cap_ctrl control;
1059 BT_DBG("chan %p, poll %d", chan, poll);
1061 memset(&control, 0, sizeof(control));
1063 control.poll = poll;
1065 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1066 control.super = L2CAP_SUPER_RNR;
1068 control.super = L2CAP_SUPER_RR;
1070 control.reqseq = chan->buffer_seq;
1071 l2cap_send_sframe(chan, &control);
1074 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1076 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1079 static bool __amp_capable(struct l2cap_chan *chan)
1081 struct l2cap_conn *conn = chan->conn;
1082 struct hci_dev *hdev;
1083 bool amp_available = false;
1085 if (!conn->hs_enabled)
1088 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1091 read_lock(&hci_dev_list_lock);
1092 list_for_each_entry(hdev, &hci_dev_list, list) {
1093 if (hdev->amp_type != AMP_TYPE_BREDR &&
1094 test_bit(HCI_UP, &hdev->flags)) {
1095 amp_available = true;
1099 read_unlock(&hci_dev_list_lock);
1101 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1102 return amp_available;
1107 static bool l2cap_check_efs(struct l2cap_chan *chan)
1109 /* Check EFS parameters */
1113 void l2cap_send_conn_req(struct l2cap_chan *chan)
1115 struct l2cap_conn *conn = chan->conn;
1116 struct l2cap_conn_req req;
1118 req.scid = cpu_to_le16(chan->scid);
1119 req.psm = chan->psm;
1121 chan->ident = l2cap_get_ident(conn);
1123 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1125 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1128 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1130 struct l2cap_create_chan_req req;
1131 req.scid = cpu_to_le16(chan->scid);
1132 req.psm = chan->psm;
1133 req.amp_id = amp_id;
1135 chan->ident = l2cap_get_ident(chan->conn);
1137 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1141 static void l2cap_move_setup(struct l2cap_chan *chan)
1143 struct sk_buff *skb;
1145 BT_DBG("chan %p", chan);
1147 if (chan->mode != L2CAP_MODE_ERTM)
1150 __clear_retrans_timer(chan);
1151 __clear_monitor_timer(chan);
1152 __clear_ack_timer(chan);
1154 chan->retry_count = 0;
1155 skb_queue_walk(&chan->tx_q, skb) {
1156 if (bt_cb(skb)->control.retries)
1157 bt_cb(skb)->control.retries = 1;
1162 chan->expected_tx_seq = chan->buffer_seq;
1164 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1165 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1166 l2cap_seq_list_clear(&chan->retrans_list);
1167 l2cap_seq_list_clear(&chan->srej_list);
1168 skb_queue_purge(&chan->srej_q);
1170 chan->tx_state = L2CAP_TX_STATE_XMIT;
1171 chan->rx_state = L2CAP_RX_STATE_MOVE;
1173 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1176 static void l2cap_move_done(struct l2cap_chan *chan)
1178 u8 move_role = chan->move_role;
1179 BT_DBG("chan %p", chan);
1181 chan->move_state = L2CAP_MOVE_STABLE;
1182 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1184 if (chan->mode != L2CAP_MODE_ERTM)
1187 switch (move_role) {
1188 case L2CAP_MOVE_ROLE_INITIATOR:
1189 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1190 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1192 case L2CAP_MOVE_ROLE_RESPONDER:
1193 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1198 static void l2cap_le_flowctl_start(struct l2cap_chan *chan)
1201 chan->sdu_last_frag = NULL;
1204 skb_queue_head_init(&chan->tx_q);
1207 static void l2cap_chan_ready(struct l2cap_chan *chan)
1209 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1210 chan->conf_state = 0;
1211 __clear_chan_timer(chan);
1213 if (chan->mode == L2CAP_MODE_LE_FLOWCTL)
1214 l2cap_le_flowctl_start(chan);
1216 chan->state = BT_CONNECTED;
1218 chan->ops->ready(chan);
1221 static void l2cap_le_connect(struct l2cap_chan *chan)
1223 struct l2cap_conn *conn = chan->conn;
1224 struct l2cap_le_conn_req req;
1226 req.psm = chan->psm;
1227 req.scid = cpu_to_le16(chan->scid);
1228 req.mtu = cpu_to_le16(chan->imtu);
1229 req.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
1230 req.credits = cpu_to_le16(chan->rx_credits);
1232 chan->ident = l2cap_get_ident(conn);
1234 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1238 static void l2cap_le_start(struct l2cap_chan *chan)
1240 struct l2cap_conn *conn = chan->conn;
1242 if (!smp_conn_security(conn->hcon, chan->sec_level))
1246 l2cap_chan_ready(chan);
1250 if (chan->state == BT_CONNECT)
1251 l2cap_le_connect(chan);
1254 static void l2cap_start_connection(struct l2cap_chan *chan)
1256 if (__amp_capable(chan)) {
1257 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1258 a2mp_discover_amp(chan);
1259 } else if (chan->conn->hcon->type == LE_LINK) {
1260 l2cap_le_start(chan);
1262 l2cap_send_conn_req(chan);
1266 static void l2cap_do_start(struct l2cap_chan *chan)
1268 struct l2cap_conn *conn = chan->conn;
1270 if (conn->hcon->type == LE_LINK) {
1271 l2cap_le_start(chan);
1275 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1276 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1279 if (l2cap_chan_check_security(chan) &&
1280 __l2cap_no_conn_pending(chan)) {
1281 l2cap_start_connection(chan);
1284 struct l2cap_info_req req;
1285 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1287 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1288 conn->info_ident = l2cap_get_ident(conn);
1290 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1292 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1297 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1299 u32 local_feat_mask = l2cap_feat_mask;
1301 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1304 case L2CAP_MODE_ERTM:
1305 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1306 case L2CAP_MODE_STREAMING:
1307 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1313 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1315 struct l2cap_conn *conn = chan->conn;
1316 struct l2cap_disconn_req req;
1321 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1322 __clear_retrans_timer(chan);
1323 __clear_monitor_timer(chan);
1324 __clear_ack_timer(chan);
1327 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1328 l2cap_state_change(chan, BT_DISCONN);
1332 req.dcid = cpu_to_le16(chan->dcid);
1333 req.scid = cpu_to_le16(chan->scid);
1334 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1337 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1340 /* ---- L2CAP connections ---- */
1341 static void l2cap_conn_start(struct l2cap_conn *conn)
1343 struct l2cap_chan *chan, *tmp;
1345 BT_DBG("conn %p", conn);
1347 mutex_lock(&conn->chan_lock);
1349 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1350 l2cap_chan_lock(chan);
1352 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1353 l2cap_chan_unlock(chan);
1357 if (chan->state == BT_CONNECT) {
1358 if (!l2cap_chan_check_security(chan) ||
1359 !__l2cap_no_conn_pending(chan)) {
1360 l2cap_chan_unlock(chan);
1364 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1365 && test_bit(CONF_STATE2_DEVICE,
1366 &chan->conf_state)) {
1367 l2cap_chan_close(chan, ECONNRESET);
1368 l2cap_chan_unlock(chan);
1372 l2cap_start_connection(chan);
1374 } else if (chan->state == BT_CONNECT2) {
1375 struct l2cap_conn_rsp rsp;
1377 rsp.scid = cpu_to_le16(chan->dcid);
1378 rsp.dcid = cpu_to_le16(chan->scid);
1380 if (l2cap_chan_check_security(chan)) {
1381 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1382 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1383 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1384 chan->ops->defer(chan);
1387 l2cap_state_change(chan, BT_CONFIG);
1388 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1389 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1392 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1393 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1396 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1399 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1400 rsp.result != L2CAP_CR_SUCCESS) {
1401 l2cap_chan_unlock(chan);
1405 set_bit(CONF_REQ_SENT, &chan->conf_state);
1406 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1407 l2cap_build_conf_req(chan, buf), buf);
1408 chan->num_conf_req++;
1411 l2cap_chan_unlock(chan);
1414 mutex_unlock(&conn->chan_lock);
1417 /* Find socket with cid and source/destination bdaddr.
1418 * Returns closest match, locked.
1420 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1424 struct l2cap_chan *c, *c1 = NULL;
1426 read_lock(&chan_list_lock);
1428 list_for_each_entry(c, &chan_list, global_l) {
1429 if (state && c->state != state)
1432 if (c->scid == cid) {
1433 int src_match, dst_match;
1434 int src_any, dst_any;
1437 src_match = !bacmp(&c->src, src);
1438 dst_match = !bacmp(&c->dst, dst);
1439 if (src_match && dst_match) {
1440 read_unlock(&chan_list_lock);
1445 src_any = !bacmp(&c->src, BDADDR_ANY);
1446 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1447 if ((src_match && dst_any) || (src_any && dst_match) ||
1448 (src_any && dst_any))
1453 read_unlock(&chan_list_lock);
1458 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1460 struct hci_conn *hcon = conn->hcon;
1461 struct l2cap_chan *chan, *pchan;
1466 /* Check if we have socket listening on cid */
1467 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1468 &hcon->src, &hcon->dst);
1472 /* Client ATT sockets should override the server one */
1473 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1476 dst_type = bdaddr_type(hcon, hcon->dst_type);
1478 /* If device is blocked, do not create a channel for it */
1479 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1482 l2cap_chan_lock(pchan);
1484 chan = pchan->ops->new_connection(pchan);
1488 chan->dcid = L2CAP_CID_ATT;
1490 bacpy(&chan->src, &hcon->src);
1491 bacpy(&chan->dst, &hcon->dst);
1492 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1493 chan->dst_type = dst_type;
1495 __l2cap_chan_add(conn, chan);
1498 l2cap_chan_unlock(pchan);
1501 static void l2cap_conn_ready(struct l2cap_conn *conn)
1503 struct l2cap_chan *chan;
1504 struct hci_conn *hcon = conn->hcon;
1506 BT_DBG("conn %p", conn);
1508 /* For outgoing pairing which doesn't necessarily have an
1509 * associated socket (e.g. mgmt_pair_device).
1511 if (hcon->out && hcon->type == LE_LINK)
1512 smp_conn_security(hcon, hcon->pending_sec_level);
1514 mutex_lock(&conn->chan_lock);
1516 if (hcon->type == LE_LINK)
1517 l2cap_le_conn_ready(conn);
1519 list_for_each_entry(chan, &conn->chan_l, list) {
1521 l2cap_chan_lock(chan);
1523 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1524 l2cap_chan_unlock(chan);
1528 if (hcon->type == LE_LINK) {
1529 l2cap_le_start(chan);
1530 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1531 l2cap_chan_ready(chan);
1533 } else if (chan->state == BT_CONNECT) {
1534 l2cap_do_start(chan);
1537 l2cap_chan_unlock(chan);
1540 mutex_unlock(&conn->chan_lock);
1543 /* Notify sockets that we cannot guaranty reliability anymore */
1544 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1546 struct l2cap_chan *chan;
1548 BT_DBG("conn %p", conn);
1550 mutex_lock(&conn->chan_lock);
1552 list_for_each_entry(chan, &conn->chan_l, list) {
1553 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1554 l2cap_chan_set_err(chan, err);
1557 mutex_unlock(&conn->chan_lock);
1560 static void l2cap_info_timeout(struct work_struct *work)
1562 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1565 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1566 conn->info_ident = 0;
1568 l2cap_conn_start(conn);
1573 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1574 * callback is called during registration. The ->remove callback is called
1575 * during unregistration.
1576 * An l2cap_user object can either be explicitly unregistered or when the
1577 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1578 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1579 * External modules must own a reference to the l2cap_conn object if they intend
1580 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1581 * any time if they don't.
1584 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1586 struct hci_dev *hdev = conn->hcon->hdev;
1589 /* We need to check whether l2cap_conn is registered. If it is not, we
1590 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1591 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1592 * relies on the parent hci_conn object to be locked. This itself relies
1593 * on the hci_dev object to be locked. So we must lock the hci device
1598 if (user->list.next || user->list.prev) {
1603 /* conn->hchan is NULL after l2cap_conn_del() was called */
1609 ret = user->probe(conn, user);
1613 list_add(&user->list, &conn->users);
1617 hci_dev_unlock(hdev);
1620 EXPORT_SYMBOL(l2cap_register_user);
1622 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1624 struct hci_dev *hdev = conn->hcon->hdev;
1628 if (!user->list.next || !user->list.prev)
1631 list_del(&user->list);
1632 user->list.next = NULL;
1633 user->list.prev = NULL;
1634 user->remove(conn, user);
1637 hci_dev_unlock(hdev);
1639 EXPORT_SYMBOL(l2cap_unregister_user);
1641 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1643 struct l2cap_user *user;
1645 while (!list_empty(&conn->users)) {
1646 user = list_first_entry(&conn->users, struct l2cap_user, list);
1647 list_del(&user->list);
1648 user->list.next = NULL;
1649 user->list.prev = NULL;
1650 user->remove(conn, user);
1654 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1656 struct l2cap_conn *conn = hcon->l2cap_data;
1657 struct l2cap_chan *chan, *l;
1662 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1664 kfree_skb(conn->rx_skb);
1666 l2cap_unregister_all_users(conn);
1668 mutex_lock(&conn->chan_lock);
1671 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1672 l2cap_chan_hold(chan);
1673 l2cap_chan_lock(chan);
1675 l2cap_chan_del(chan, err);
1677 l2cap_chan_unlock(chan);
1679 chan->ops->close(chan);
1680 l2cap_chan_put(chan);
1683 mutex_unlock(&conn->chan_lock);
1685 hci_chan_del(conn->hchan);
1687 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1688 cancel_delayed_work_sync(&conn->info_timer);
1690 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1691 cancel_delayed_work_sync(&conn->security_timer);
1692 smp_chan_destroy(conn);
1695 hcon->l2cap_data = NULL;
1697 l2cap_conn_put(conn);
1700 static void security_timeout(struct work_struct *work)
1702 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1703 security_timer.work);
1705 BT_DBG("conn %p", conn);
1707 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1708 smp_chan_destroy(conn);
1709 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1713 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1715 struct l2cap_conn *conn = hcon->l2cap_data;
1716 struct hci_chan *hchan;
1721 hchan = hci_chan_create(hcon);
1725 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1727 hci_chan_del(hchan);
1731 kref_init(&conn->ref);
1732 hcon->l2cap_data = conn;
1734 hci_conn_get(conn->hcon);
1735 conn->hchan = hchan;
1737 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1739 switch (hcon->type) {
1741 if (hcon->hdev->le_mtu) {
1742 conn->mtu = hcon->hdev->le_mtu;
1747 conn->mtu = hcon->hdev->acl_mtu;
1751 conn->feat_mask = 0;
1753 if (hcon->type == ACL_LINK)
1754 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1755 &hcon->hdev->dev_flags);
1757 spin_lock_init(&conn->lock);
1758 mutex_init(&conn->chan_lock);
1760 INIT_LIST_HEAD(&conn->chan_l);
1761 INIT_LIST_HEAD(&conn->users);
1763 if (hcon->type == LE_LINK)
1764 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1766 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1768 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1773 static void l2cap_conn_free(struct kref *ref)
1775 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1777 hci_conn_put(conn->hcon);
1781 void l2cap_conn_get(struct l2cap_conn *conn)
1783 kref_get(&conn->ref);
1785 EXPORT_SYMBOL(l2cap_conn_get);
1787 void l2cap_conn_put(struct l2cap_conn *conn)
1789 kref_put(&conn->ref, l2cap_conn_free);
1791 EXPORT_SYMBOL(l2cap_conn_put);
1793 /* ---- Socket interface ---- */
1795 /* Find socket with psm and source / destination bdaddr.
1796 * Returns closest match.
1798 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1803 struct l2cap_chan *c, *c1 = NULL;
1805 read_lock(&chan_list_lock);
1807 list_for_each_entry(c, &chan_list, global_l) {
1808 if (state && c->state != state)
1811 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1814 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1817 if (c->psm == psm) {
1818 int src_match, dst_match;
1819 int src_any, dst_any;
1822 src_match = !bacmp(&c->src, src);
1823 dst_match = !bacmp(&c->dst, dst);
1824 if (src_match && dst_match) {
1825 read_unlock(&chan_list_lock);
1830 src_any = !bacmp(&c->src, BDADDR_ANY);
1831 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1832 if ((src_match && dst_any) || (src_any && dst_match) ||
1833 (src_any && dst_any))
1838 read_unlock(&chan_list_lock);
1843 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1844 bdaddr_t *dst, u8 dst_type)
1846 struct l2cap_conn *conn;
1847 struct hci_conn *hcon;
1848 struct hci_dev *hdev;
1852 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1853 dst_type, __le16_to_cpu(psm));
1855 hdev = hci_get_route(dst, &chan->src);
1857 return -EHOSTUNREACH;
1861 l2cap_chan_lock(chan);
1863 /* PSM must be odd and lsb of upper byte must be 0 */
1864 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1865 chan->chan_type != L2CAP_CHAN_RAW) {
1870 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1875 switch (chan->mode) {
1876 case L2CAP_MODE_BASIC:
1877 case L2CAP_MODE_LE_FLOWCTL:
1879 case L2CAP_MODE_ERTM:
1880 case L2CAP_MODE_STREAMING:
1889 switch (chan->state) {
1893 /* Already connecting */
1898 /* Already connected */
1912 /* Set destination address and psm */
1913 bacpy(&chan->dst, dst);
1914 chan->dst_type = dst_type;
1919 auth_type = l2cap_get_auth_type(chan);
1921 if (bdaddr_type_is_le(dst_type))
1922 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1923 chan->sec_level, auth_type);
1925 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1926 chan->sec_level, auth_type);
1929 err = PTR_ERR(hcon);
1933 conn = l2cap_conn_add(hcon);
1935 hci_conn_drop(hcon);
1940 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1941 hci_conn_drop(hcon);
1946 /* Update source addr of the socket */
1947 bacpy(&chan->src, &hcon->src);
1948 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1950 l2cap_chan_unlock(chan);
1951 l2cap_chan_add(conn, chan);
1952 l2cap_chan_lock(chan);
1954 /* l2cap_chan_add takes its own ref so we can drop this one */
1955 hci_conn_drop(hcon);
1957 l2cap_state_change(chan, BT_CONNECT);
1958 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1960 if (hcon->state == BT_CONNECTED) {
1961 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1962 __clear_chan_timer(chan);
1963 if (l2cap_chan_check_security(chan))
1964 l2cap_state_change(chan, BT_CONNECTED);
1966 l2cap_do_start(chan);
1972 l2cap_chan_unlock(chan);
1973 hci_dev_unlock(hdev);
1978 static void l2cap_monitor_timeout(struct work_struct *work)
1980 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1981 monitor_timer.work);
1983 BT_DBG("chan %p", chan);
1985 l2cap_chan_lock(chan);
1988 l2cap_chan_unlock(chan);
1989 l2cap_chan_put(chan);
1993 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1995 l2cap_chan_unlock(chan);
1996 l2cap_chan_put(chan);
1999 static void l2cap_retrans_timeout(struct work_struct *work)
2001 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2002 retrans_timer.work);
2004 BT_DBG("chan %p", chan);
2006 l2cap_chan_lock(chan);
2009 l2cap_chan_unlock(chan);
2010 l2cap_chan_put(chan);
2014 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2015 l2cap_chan_unlock(chan);
2016 l2cap_chan_put(chan);
2019 static void l2cap_streaming_send(struct l2cap_chan *chan,
2020 struct sk_buff_head *skbs)
2022 struct sk_buff *skb;
2023 struct l2cap_ctrl *control;
2025 BT_DBG("chan %p, skbs %p", chan, skbs);
2027 if (__chan_is_moving(chan))
2030 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2032 while (!skb_queue_empty(&chan->tx_q)) {
2034 skb = skb_dequeue(&chan->tx_q);
2036 bt_cb(skb)->control.retries = 1;
2037 control = &bt_cb(skb)->control;
2039 control->reqseq = 0;
2040 control->txseq = chan->next_tx_seq;
2042 __pack_control(chan, control, skb);
2044 if (chan->fcs == L2CAP_FCS_CRC16) {
2045 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2046 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2049 l2cap_do_send(chan, skb);
2051 BT_DBG("Sent txseq %u", control->txseq);
2053 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2054 chan->frames_sent++;
2058 static int l2cap_ertm_send(struct l2cap_chan *chan)
2060 struct sk_buff *skb, *tx_skb;
2061 struct l2cap_ctrl *control;
2064 BT_DBG("chan %p", chan);
2066 if (chan->state != BT_CONNECTED)
2069 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2072 if (__chan_is_moving(chan))
2075 while (chan->tx_send_head &&
2076 chan->unacked_frames < chan->remote_tx_win &&
2077 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2079 skb = chan->tx_send_head;
2081 bt_cb(skb)->control.retries = 1;
2082 control = &bt_cb(skb)->control;
2084 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2087 control->reqseq = chan->buffer_seq;
2088 chan->last_acked_seq = chan->buffer_seq;
2089 control->txseq = chan->next_tx_seq;
2091 __pack_control(chan, control, skb);
2093 if (chan->fcs == L2CAP_FCS_CRC16) {
2094 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2095 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2098 /* Clone after data has been modified. Data is assumed to be
2099 read-only (for locking purposes) on cloned sk_buffs.
2101 tx_skb = skb_clone(skb, GFP_KERNEL);
2106 __set_retrans_timer(chan);
2108 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2109 chan->unacked_frames++;
2110 chan->frames_sent++;
2113 if (skb_queue_is_last(&chan->tx_q, skb))
2114 chan->tx_send_head = NULL;
2116 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2118 l2cap_do_send(chan, tx_skb);
2119 BT_DBG("Sent txseq %u", control->txseq);
2122 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2123 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2128 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2130 struct l2cap_ctrl control;
2131 struct sk_buff *skb;
2132 struct sk_buff *tx_skb;
2135 BT_DBG("chan %p", chan);
2137 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2140 if (__chan_is_moving(chan))
2143 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2144 seq = l2cap_seq_list_pop(&chan->retrans_list);
2146 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2148 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2153 bt_cb(skb)->control.retries++;
2154 control = bt_cb(skb)->control;
2156 if (chan->max_tx != 0 &&
2157 bt_cb(skb)->control.retries > chan->max_tx) {
2158 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2159 l2cap_send_disconn_req(chan, ECONNRESET);
2160 l2cap_seq_list_clear(&chan->retrans_list);
2164 control.reqseq = chan->buffer_seq;
2165 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2170 if (skb_cloned(skb)) {
2171 /* Cloned sk_buffs are read-only, so we need a
2174 tx_skb = skb_copy(skb, GFP_KERNEL);
2176 tx_skb = skb_clone(skb, GFP_KERNEL);
2180 l2cap_seq_list_clear(&chan->retrans_list);
2184 /* Update skb contents */
2185 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2186 put_unaligned_le32(__pack_extended_control(&control),
2187 tx_skb->data + L2CAP_HDR_SIZE);
2189 put_unaligned_le16(__pack_enhanced_control(&control),
2190 tx_skb->data + L2CAP_HDR_SIZE);
2193 if (chan->fcs == L2CAP_FCS_CRC16) {
2194 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2195 put_unaligned_le16(fcs, skb_put(tx_skb,
2199 l2cap_do_send(chan, tx_skb);
2201 BT_DBG("Resent txseq %d", control.txseq);
2203 chan->last_acked_seq = chan->buffer_seq;
2207 static void l2cap_retransmit(struct l2cap_chan *chan,
2208 struct l2cap_ctrl *control)
2210 BT_DBG("chan %p, control %p", chan, control);
2212 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2213 l2cap_ertm_resend(chan);
2216 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2217 struct l2cap_ctrl *control)
2219 struct sk_buff *skb;
2221 BT_DBG("chan %p, control %p", chan, control);
2224 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2226 l2cap_seq_list_clear(&chan->retrans_list);
2228 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2231 if (chan->unacked_frames) {
2232 skb_queue_walk(&chan->tx_q, skb) {
2233 if (bt_cb(skb)->control.txseq == control->reqseq ||
2234 skb == chan->tx_send_head)
2238 skb_queue_walk_from(&chan->tx_q, skb) {
2239 if (skb == chan->tx_send_head)
2242 l2cap_seq_list_append(&chan->retrans_list,
2243 bt_cb(skb)->control.txseq);
2246 l2cap_ertm_resend(chan);
2250 static void l2cap_send_ack(struct l2cap_chan *chan)
2252 struct l2cap_ctrl control;
2253 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2254 chan->last_acked_seq);
2257 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2258 chan, chan->last_acked_seq, chan->buffer_seq);
2260 memset(&control, 0, sizeof(control));
2263 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2264 chan->rx_state == L2CAP_RX_STATE_RECV) {
2265 __clear_ack_timer(chan);
2266 control.super = L2CAP_SUPER_RNR;
2267 control.reqseq = chan->buffer_seq;
2268 l2cap_send_sframe(chan, &control);
2270 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2271 l2cap_ertm_send(chan);
2272 /* If any i-frames were sent, they included an ack */
2273 if (chan->buffer_seq == chan->last_acked_seq)
2277 /* Ack now if the window is 3/4ths full.
2278 * Calculate without mul or div
2280 threshold = chan->ack_win;
2281 threshold += threshold << 1;
2284 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2287 if (frames_to_ack >= threshold) {
2288 __clear_ack_timer(chan);
2289 control.super = L2CAP_SUPER_RR;
2290 control.reqseq = chan->buffer_seq;
2291 l2cap_send_sframe(chan, &control);
2296 __set_ack_timer(chan);
2300 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2301 struct msghdr *msg, int len,
2302 int count, struct sk_buff *skb)
2304 struct l2cap_conn *conn = chan->conn;
2305 struct sk_buff **frag;
2308 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2314 /* Continuation fragments (no L2CAP header) */
2315 frag = &skb_shinfo(skb)->frag_list;
2317 struct sk_buff *tmp;
2319 count = min_t(unsigned int, conn->mtu, len);
2321 tmp = chan->ops->alloc_skb(chan, count,
2322 msg->msg_flags & MSG_DONTWAIT);
2324 return PTR_ERR(tmp);
2328 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2331 (*frag)->priority = skb->priority;
2336 skb->len += (*frag)->len;
2337 skb->data_len += (*frag)->len;
2339 frag = &(*frag)->next;
2345 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2346 struct msghdr *msg, size_t len,
2349 struct l2cap_conn *conn = chan->conn;
2350 struct sk_buff *skb;
2351 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2352 struct l2cap_hdr *lh;
2354 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2355 __le16_to_cpu(chan->psm), len, priority);
2357 count = min_t(unsigned int, (conn->mtu - hlen), len);
2359 skb = chan->ops->alloc_skb(chan, count + hlen,
2360 msg->msg_flags & MSG_DONTWAIT);
2364 skb->priority = priority;
2366 /* Create L2CAP header */
2367 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2368 lh->cid = cpu_to_le16(chan->dcid);
2369 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2370 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2372 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2373 if (unlikely(err < 0)) {
2375 return ERR_PTR(err);
2380 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2381 struct msghdr *msg, size_t len,
2384 struct l2cap_conn *conn = chan->conn;
2385 struct sk_buff *skb;
2387 struct l2cap_hdr *lh;
2389 BT_DBG("chan %p len %zu", chan, len);
2391 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2393 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2394 msg->msg_flags & MSG_DONTWAIT);
2398 skb->priority = priority;
2400 /* Create L2CAP header */
2401 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2402 lh->cid = cpu_to_le16(chan->dcid);
2403 lh->len = cpu_to_le16(len);
2405 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2406 if (unlikely(err < 0)) {
2408 return ERR_PTR(err);
2413 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2414 struct msghdr *msg, size_t len,
2417 struct l2cap_conn *conn = chan->conn;
2418 struct sk_buff *skb;
2419 int err, count, hlen;
2420 struct l2cap_hdr *lh;
2422 BT_DBG("chan %p len %zu", chan, len);
2425 return ERR_PTR(-ENOTCONN);
2427 hlen = __ertm_hdr_size(chan);
2430 hlen += L2CAP_SDULEN_SIZE;
2432 if (chan->fcs == L2CAP_FCS_CRC16)
2433 hlen += L2CAP_FCS_SIZE;
2435 count = min_t(unsigned int, (conn->mtu - hlen), len);
2437 skb = chan->ops->alloc_skb(chan, count + hlen,
2438 msg->msg_flags & MSG_DONTWAIT);
2442 /* Create L2CAP header */
2443 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2444 lh->cid = cpu_to_le16(chan->dcid);
2445 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2447 /* Control header is populated later */
2448 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2449 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2451 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2454 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2456 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2457 if (unlikely(err < 0)) {
2459 return ERR_PTR(err);
2462 bt_cb(skb)->control.fcs = chan->fcs;
2463 bt_cb(skb)->control.retries = 0;
2467 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2468 struct sk_buff_head *seg_queue,
2469 struct msghdr *msg, size_t len)
2471 struct sk_buff *skb;
2476 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2478 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2479 * so fragmented skbs are not used. The HCI layer's handling
2480 * of fragmented skbs is not compatible with ERTM's queueing.
2483 /* PDU size is derived from the HCI MTU */
2484 pdu_len = chan->conn->mtu;
2486 /* Constrain PDU size for BR/EDR connections */
2488 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2490 /* Adjust for largest possible L2CAP overhead. */
2492 pdu_len -= L2CAP_FCS_SIZE;
2494 pdu_len -= __ertm_hdr_size(chan);
2496 /* Remote device may have requested smaller PDUs */
2497 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2499 if (len <= pdu_len) {
2500 sar = L2CAP_SAR_UNSEGMENTED;
2504 sar = L2CAP_SAR_START;
2506 pdu_len -= L2CAP_SDULEN_SIZE;
2510 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2513 __skb_queue_purge(seg_queue);
2514 return PTR_ERR(skb);
2517 bt_cb(skb)->control.sar = sar;
2518 __skb_queue_tail(seg_queue, skb);
2523 pdu_len += L2CAP_SDULEN_SIZE;
2526 if (len <= pdu_len) {
2527 sar = L2CAP_SAR_END;
2530 sar = L2CAP_SAR_CONTINUE;
2537 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2539 size_t len, u16 sdulen)
2541 struct l2cap_conn *conn = chan->conn;
2542 struct sk_buff *skb;
2543 int err, count, hlen;
2544 struct l2cap_hdr *lh;
2546 BT_DBG("chan %p len %zu", chan, len);
2549 return ERR_PTR(-ENOTCONN);
2551 hlen = L2CAP_HDR_SIZE;
2554 hlen += L2CAP_SDULEN_SIZE;
2556 count = min_t(unsigned int, (conn->mtu - hlen), len);
2558 skb = chan->ops->alloc_skb(chan, count + hlen,
2559 msg->msg_flags & MSG_DONTWAIT);
2563 /* Create L2CAP header */
2564 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2565 lh->cid = cpu_to_le16(chan->dcid);
2566 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2569 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2571 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2572 if (unlikely(err < 0)) {
2574 return ERR_PTR(err);
2580 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2581 struct sk_buff_head *seg_queue,
2582 struct msghdr *msg, size_t len)
2584 struct sk_buff *skb;
2588 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2590 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2592 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2595 pdu_len -= L2CAP_SDULEN_SIZE;
2601 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2603 __skb_queue_purge(seg_queue);
2604 return PTR_ERR(skb);
2607 __skb_queue_tail(seg_queue, skb);
2613 pdu_len += L2CAP_SDULEN_SIZE;
2620 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2623 struct sk_buff *skb;
2625 struct sk_buff_head seg_queue;
2630 /* Connectionless channel */
2631 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2632 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2634 return PTR_ERR(skb);
2636 l2cap_do_send(chan, skb);
2640 switch (chan->mode) {
2641 case L2CAP_MODE_LE_FLOWCTL:
2642 /* Check outgoing MTU */
2643 if (len > chan->omtu)
2646 if (!chan->tx_credits)
2649 __skb_queue_head_init(&seg_queue);
2651 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2653 if (chan->state != BT_CONNECTED) {
2654 __skb_queue_purge(&seg_queue);
2661 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2663 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2664 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2668 if (!chan->tx_credits)
2669 chan->ops->suspend(chan);
2675 case L2CAP_MODE_BASIC:
2676 /* Check outgoing MTU */
2677 if (len > chan->omtu)
2680 /* Create a basic PDU */
2681 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2683 return PTR_ERR(skb);
2685 l2cap_do_send(chan, skb);
2689 case L2CAP_MODE_ERTM:
2690 case L2CAP_MODE_STREAMING:
2691 /* Check outgoing MTU */
2692 if (len > chan->omtu) {
2697 __skb_queue_head_init(&seg_queue);
2699 /* Do segmentation before calling in to the state machine,
2700 * since it's possible to block while waiting for memory
2703 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2705 /* The channel could have been closed while segmenting,
2706 * check that it is still connected.
2708 if (chan->state != BT_CONNECTED) {
2709 __skb_queue_purge(&seg_queue);
2716 if (chan->mode == L2CAP_MODE_ERTM)
2717 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2719 l2cap_streaming_send(chan, &seg_queue);
2723 /* If the skbs were not queued for sending, they'll still be in
2724 * seg_queue and need to be purged.
2726 __skb_queue_purge(&seg_queue);
2730 BT_DBG("bad state %1.1x", chan->mode);
2737 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2739 struct l2cap_ctrl control;
2742 BT_DBG("chan %p, txseq %u", chan, txseq);
2744 memset(&control, 0, sizeof(control));
2746 control.super = L2CAP_SUPER_SREJ;
2748 for (seq = chan->expected_tx_seq; seq != txseq;
2749 seq = __next_seq(chan, seq)) {
2750 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2751 control.reqseq = seq;
2752 l2cap_send_sframe(chan, &control);
2753 l2cap_seq_list_append(&chan->srej_list, seq);
2757 chan->expected_tx_seq = __next_seq(chan, txseq);
2760 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2762 struct l2cap_ctrl control;
2764 BT_DBG("chan %p", chan);
2766 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2769 memset(&control, 0, sizeof(control));
2771 control.super = L2CAP_SUPER_SREJ;
2772 control.reqseq = chan->srej_list.tail;
2773 l2cap_send_sframe(chan, &control);
2776 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2778 struct l2cap_ctrl control;
2782 BT_DBG("chan %p, txseq %u", chan, txseq);
2784 memset(&control, 0, sizeof(control));
2786 control.super = L2CAP_SUPER_SREJ;
2788 /* Capture initial list head to allow only one pass through the list. */
2789 initial_head = chan->srej_list.head;
2792 seq = l2cap_seq_list_pop(&chan->srej_list);
2793 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2796 control.reqseq = seq;
2797 l2cap_send_sframe(chan, &control);
2798 l2cap_seq_list_append(&chan->srej_list, seq);
2799 } while (chan->srej_list.head != initial_head);
2802 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2804 struct sk_buff *acked_skb;
2807 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2809 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2812 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2813 chan->expected_ack_seq, chan->unacked_frames);
2815 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2816 ackseq = __next_seq(chan, ackseq)) {
2818 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2820 skb_unlink(acked_skb, &chan->tx_q);
2821 kfree_skb(acked_skb);
2822 chan->unacked_frames--;
2826 chan->expected_ack_seq = reqseq;
2828 if (chan->unacked_frames == 0)
2829 __clear_retrans_timer(chan);
2831 BT_DBG("unacked_frames %u", chan->unacked_frames);
2834 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2836 BT_DBG("chan %p", chan);
2838 chan->expected_tx_seq = chan->buffer_seq;
2839 l2cap_seq_list_clear(&chan->srej_list);
2840 skb_queue_purge(&chan->srej_q);
2841 chan->rx_state = L2CAP_RX_STATE_RECV;
2844 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2845 struct l2cap_ctrl *control,
2846 struct sk_buff_head *skbs, u8 event)
2848 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2852 case L2CAP_EV_DATA_REQUEST:
2853 if (chan->tx_send_head == NULL)
2854 chan->tx_send_head = skb_peek(skbs);
2856 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2857 l2cap_ertm_send(chan);
2859 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2860 BT_DBG("Enter LOCAL_BUSY");
2861 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2863 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2864 /* The SREJ_SENT state must be aborted if we are to
2865 * enter the LOCAL_BUSY state.
2867 l2cap_abort_rx_srej_sent(chan);
2870 l2cap_send_ack(chan);
2873 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2874 BT_DBG("Exit LOCAL_BUSY");
2875 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2877 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2878 struct l2cap_ctrl local_control;
2880 memset(&local_control, 0, sizeof(local_control));
2881 local_control.sframe = 1;
2882 local_control.super = L2CAP_SUPER_RR;
2883 local_control.poll = 1;
2884 local_control.reqseq = chan->buffer_seq;
2885 l2cap_send_sframe(chan, &local_control);
2887 chan->retry_count = 1;
2888 __set_monitor_timer(chan);
2889 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2892 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2893 l2cap_process_reqseq(chan, control->reqseq);
2895 case L2CAP_EV_EXPLICIT_POLL:
2896 l2cap_send_rr_or_rnr(chan, 1);
2897 chan->retry_count = 1;
2898 __set_monitor_timer(chan);
2899 __clear_ack_timer(chan);
2900 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2902 case L2CAP_EV_RETRANS_TO:
2903 l2cap_send_rr_or_rnr(chan, 1);
2904 chan->retry_count = 1;
2905 __set_monitor_timer(chan);
2906 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2908 case L2CAP_EV_RECV_FBIT:
2909 /* Nothing to process */
2916 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2917 struct l2cap_ctrl *control,
2918 struct sk_buff_head *skbs, u8 event)
2920 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2924 case L2CAP_EV_DATA_REQUEST:
2925 if (chan->tx_send_head == NULL)
2926 chan->tx_send_head = skb_peek(skbs);
2927 /* Queue data, but don't send. */
2928 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2930 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2931 BT_DBG("Enter LOCAL_BUSY");
2932 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2934 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2935 /* The SREJ_SENT state must be aborted if we are to
2936 * enter the LOCAL_BUSY state.
2938 l2cap_abort_rx_srej_sent(chan);
2941 l2cap_send_ack(chan);
2944 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2945 BT_DBG("Exit LOCAL_BUSY");
2946 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2948 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2949 struct l2cap_ctrl local_control;
2950 memset(&local_control, 0, sizeof(local_control));
2951 local_control.sframe = 1;
2952 local_control.super = L2CAP_SUPER_RR;
2953 local_control.poll = 1;
2954 local_control.reqseq = chan->buffer_seq;
2955 l2cap_send_sframe(chan, &local_control);
2957 chan->retry_count = 1;
2958 __set_monitor_timer(chan);
2959 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2962 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2963 l2cap_process_reqseq(chan, control->reqseq);
2967 case L2CAP_EV_RECV_FBIT:
2968 if (control && control->final) {
2969 __clear_monitor_timer(chan);
2970 if (chan->unacked_frames > 0)
2971 __set_retrans_timer(chan);
2972 chan->retry_count = 0;
2973 chan->tx_state = L2CAP_TX_STATE_XMIT;
2974 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2977 case L2CAP_EV_EXPLICIT_POLL:
2980 case L2CAP_EV_MONITOR_TO:
2981 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2982 l2cap_send_rr_or_rnr(chan, 1);
2983 __set_monitor_timer(chan);
2984 chan->retry_count++;
2986 l2cap_send_disconn_req(chan, ECONNABORTED);
2994 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2995 struct sk_buff_head *skbs, u8 event)
2997 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2998 chan, control, skbs, event, chan->tx_state);
3000 switch (chan->tx_state) {
3001 case L2CAP_TX_STATE_XMIT:
3002 l2cap_tx_state_xmit(chan, control, skbs, event);
3004 case L2CAP_TX_STATE_WAIT_F:
3005 l2cap_tx_state_wait_f(chan, control, skbs, event);
3013 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3014 struct l2cap_ctrl *control)
3016 BT_DBG("chan %p, control %p", chan, control);
3017 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3020 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3021 struct l2cap_ctrl *control)
3023 BT_DBG("chan %p, control %p", chan, control);
3024 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3027 /* Copy frame to all raw sockets on that connection */
3028 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3030 struct sk_buff *nskb;
3031 struct l2cap_chan *chan;
3033 BT_DBG("conn %p", conn);
3035 mutex_lock(&conn->chan_lock);
3037 list_for_each_entry(chan, &conn->chan_l, list) {
3038 if (chan->chan_type != L2CAP_CHAN_RAW)
3041 /* Don't send frame to the channel it came from */
3042 if (bt_cb(skb)->chan == chan)
3045 nskb = skb_clone(skb, GFP_KERNEL);
3048 if (chan->ops->recv(chan, nskb))
3052 mutex_unlock(&conn->chan_lock);
3055 /* ---- L2CAP signalling commands ---- */
3056 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3057 u8 ident, u16 dlen, void *data)
3059 struct sk_buff *skb, **frag;
3060 struct l2cap_cmd_hdr *cmd;
3061 struct l2cap_hdr *lh;
3064 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3065 conn, code, ident, dlen);
3067 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3070 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3071 count = min_t(unsigned int, conn->mtu, len);
3073 skb = bt_skb_alloc(count, GFP_KERNEL);
3077 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3078 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3080 if (conn->hcon->type == LE_LINK)
3081 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3083 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3085 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3088 cmd->len = cpu_to_le16(dlen);
3091 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3092 memcpy(skb_put(skb, count), data, count);
3098 /* Continuation fragments (no L2CAP header) */
3099 frag = &skb_shinfo(skb)->frag_list;
3101 count = min_t(unsigned int, conn->mtu, len);
3103 *frag = bt_skb_alloc(count, GFP_KERNEL);
3107 memcpy(skb_put(*frag, count), data, count);
3112 frag = &(*frag)->next;
3122 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3125 struct l2cap_conf_opt *opt = *ptr;
3128 len = L2CAP_CONF_OPT_SIZE + opt->len;
3136 *val = *((u8 *) opt->val);
3140 *val = get_unaligned_le16(opt->val);
3144 *val = get_unaligned_le32(opt->val);
3148 *val = (unsigned long) opt->val;
3152 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3156 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3158 struct l2cap_conf_opt *opt = *ptr;
3160 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3167 *((u8 *) opt->val) = val;
3171 put_unaligned_le16(val, opt->val);
3175 put_unaligned_le32(val, opt->val);
3179 memcpy(opt->val, (void *) val, len);
3183 *ptr += L2CAP_CONF_OPT_SIZE + len;
3186 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3188 struct l2cap_conf_efs efs;
3190 switch (chan->mode) {
3191 case L2CAP_MODE_ERTM:
3192 efs.id = chan->local_id;
3193 efs.stype = chan->local_stype;
3194 efs.msdu = cpu_to_le16(chan->local_msdu);
3195 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3196 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3197 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3200 case L2CAP_MODE_STREAMING:
3202 efs.stype = L2CAP_SERV_BESTEFFORT;
3203 efs.msdu = cpu_to_le16(chan->local_msdu);
3204 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3213 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3214 (unsigned long) &efs);
3217 static void l2cap_ack_timeout(struct work_struct *work)
3219 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3223 BT_DBG("chan %p", chan);
3225 l2cap_chan_lock(chan);
3227 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3228 chan->last_acked_seq);
3231 l2cap_send_rr_or_rnr(chan, 0);
3233 l2cap_chan_unlock(chan);
3234 l2cap_chan_put(chan);
3237 int l2cap_ertm_init(struct l2cap_chan *chan)
3241 chan->next_tx_seq = 0;
3242 chan->expected_tx_seq = 0;
3243 chan->expected_ack_seq = 0;
3244 chan->unacked_frames = 0;
3245 chan->buffer_seq = 0;
3246 chan->frames_sent = 0;
3247 chan->last_acked_seq = 0;
3249 chan->sdu_last_frag = NULL;
3252 skb_queue_head_init(&chan->tx_q);
3254 chan->local_amp_id = AMP_ID_BREDR;
3255 chan->move_id = AMP_ID_BREDR;
3256 chan->move_state = L2CAP_MOVE_STABLE;
3257 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3259 if (chan->mode != L2CAP_MODE_ERTM)
3262 chan->rx_state = L2CAP_RX_STATE_RECV;
3263 chan->tx_state = L2CAP_TX_STATE_XMIT;
3265 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3266 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3267 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3269 skb_queue_head_init(&chan->srej_q);
3271 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3275 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3277 l2cap_seq_list_free(&chan->srej_list);
3282 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3285 case L2CAP_MODE_STREAMING:
3286 case L2CAP_MODE_ERTM:
3287 if (l2cap_mode_supported(mode, remote_feat_mask))
3291 return L2CAP_MODE_BASIC;
3295 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3297 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3300 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3302 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3305 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3306 struct l2cap_conf_rfc *rfc)
3308 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3309 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3311 /* Class 1 devices have must have ERTM timeouts
3312 * exceeding the Link Supervision Timeout. The
3313 * default Link Supervision Timeout for AMP
3314 * controllers is 10 seconds.
3316 * Class 1 devices use 0xffffffff for their
3317 * best-effort flush timeout, so the clamping logic
3318 * will result in a timeout that meets the above
3319 * requirement. ERTM timeouts are 16-bit values, so
3320 * the maximum timeout is 65.535 seconds.
3323 /* Convert timeout to milliseconds and round */
3324 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3326 /* This is the recommended formula for class 2 devices
3327 * that start ERTM timers when packets are sent to the
3330 ertm_to = 3 * ertm_to + 500;
3332 if (ertm_to > 0xffff)
3335 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3336 rfc->monitor_timeout = rfc->retrans_timeout;
3338 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3339 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3343 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3345 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3346 __l2cap_ews_supported(chan->conn)) {
3347 /* use extended control field */
3348 set_bit(FLAG_EXT_CTRL, &chan->flags);
3349 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3351 chan->tx_win = min_t(u16, chan->tx_win,
3352 L2CAP_DEFAULT_TX_WINDOW);
3353 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3355 chan->ack_win = chan->tx_win;
3358 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3360 struct l2cap_conf_req *req = data;
3361 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3362 void *ptr = req->data;
3365 BT_DBG("chan %p", chan);
3367 if (chan->num_conf_req || chan->num_conf_rsp)
3370 switch (chan->mode) {
3371 case L2CAP_MODE_STREAMING:
3372 case L2CAP_MODE_ERTM:
3373 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3376 if (__l2cap_efs_supported(chan->conn))
3377 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3381 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3386 if (chan->imtu != L2CAP_DEFAULT_MTU)
3387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3389 switch (chan->mode) {
3390 case L2CAP_MODE_BASIC:
3391 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3392 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3395 rfc.mode = L2CAP_MODE_BASIC;
3397 rfc.max_transmit = 0;
3398 rfc.retrans_timeout = 0;
3399 rfc.monitor_timeout = 0;
3400 rfc.max_pdu_size = 0;
3402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3403 (unsigned long) &rfc);
3406 case L2CAP_MODE_ERTM:
3407 rfc.mode = L2CAP_MODE_ERTM;
3408 rfc.max_transmit = chan->max_tx;
3410 __l2cap_set_ertm_timeouts(chan, &rfc);
3412 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3413 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3415 rfc.max_pdu_size = cpu_to_le16(size);
3417 l2cap_txwin_setup(chan);
3419 rfc.txwin_size = min_t(u16, chan->tx_win,
3420 L2CAP_DEFAULT_TX_WINDOW);
3422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3423 (unsigned long) &rfc);
3425 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3426 l2cap_add_opt_efs(&ptr, chan);
3428 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3429 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3432 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3433 if (chan->fcs == L2CAP_FCS_NONE ||
3434 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3435 chan->fcs = L2CAP_FCS_NONE;
3436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3441 case L2CAP_MODE_STREAMING:
3442 l2cap_txwin_setup(chan);
3443 rfc.mode = L2CAP_MODE_STREAMING;
3445 rfc.max_transmit = 0;
3446 rfc.retrans_timeout = 0;
3447 rfc.monitor_timeout = 0;
3449 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3450 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3452 rfc.max_pdu_size = cpu_to_le16(size);
3454 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3455 (unsigned long) &rfc);
3457 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3458 l2cap_add_opt_efs(&ptr, chan);
3460 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3461 if (chan->fcs == L2CAP_FCS_NONE ||
3462 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3463 chan->fcs = L2CAP_FCS_NONE;
3464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3470 req->dcid = cpu_to_le16(chan->dcid);
3471 req->flags = __constant_cpu_to_le16(0);
3476 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3478 struct l2cap_conf_rsp *rsp = data;
3479 void *ptr = rsp->data;
3480 void *req = chan->conf_req;
3481 int len = chan->conf_len;
3482 int type, hint, olen;
3484 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3485 struct l2cap_conf_efs efs;
3487 u16 mtu = L2CAP_DEFAULT_MTU;
3488 u16 result = L2CAP_CONF_SUCCESS;
3491 BT_DBG("chan %p", chan);
3493 while (len >= L2CAP_CONF_OPT_SIZE) {
3494 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3496 hint = type & L2CAP_CONF_HINT;
3497 type &= L2CAP_CONF_MASK;
3500 case L2CAP_CONF_MTU:
3504 case L2CAP_CONF_FLUSH_TO:
3505 chan->flush_to = val;
3508 case L2CAP_CONF_QOS:
3511 case L2CAP_CONF_RFC:
3512 if (olen == sizeof(rfc))
3513 memcpy(&rfc, (void *) val, olen);
3516 case L2CAP_CONF_FCS:
3517 if (val == L2CAP_FCS_NONE)
3518 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3521 case L2CAP_CONF_EFS:
3523 if (olen == sizeof(efs))
3524 memcpy(&efs, (void *) val, olen);
3527 case L2CAP_CONF_EWS:
3528 if (!chan->conn->hs_enabled)
3529 return -ECONNREFUSED;
3531 set_bit(FLAG_EXT_CTRL, &chan->flags);
3532 set_bit(CONF_EWS_RECV, &chan->conf_state);
3533 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3534 chan->remote_tx_win = val;
3541 result = L2CAP_CONF_UNKNOWN;
3542 *((u8 *) ptr++) = type;
3547 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3550 switch (chan->mode) {
3551 case L2CAP_MODE_STREAMING:
3552 case L2CAP_MODE_ERTM:
3553 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3554 chan->mode = l2cap_select_mode(rfc.mode,
3555 chan->conn->feat_mask);
3560 if (__l2cap_efs_supported(chan->conn))
3561 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3563 return -ECONNREFUSED;
3566 if (chan->mode != rfc.mode)
3567 return -ECONNREFUSED;
3573 if (chan->mode != rfc.mode) {
3574 result = L2CAP_CONF_UNACCEPT;
3575 rfc.mode = chan->mode;
3577 if (chan->num_conf_rsp == 1)
3578 return -ECONNREFUSED;
3580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3581 (unsigned long) &rfc);
3584 if (result == L2CAP_CONF_SUCCESS) {
3585 /* Configure output options and let the other side know
3586 * which ones we don't like. */
3588 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3589 result = L2CAP_CONF_UNACCEPT;
3592 set_bit(CONF_MTU_DONE, &chan->conf_state);
3594 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3597 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3598 efs.stype != L2CAP_SERV_NOTRAFIC &&
3599 efs.stype != chan->local_stype) {
3601 result = L2CAP_CONF_UNACCEPT;
3603 if (chan->num_conf_req >= 1)
3604 return -ECONNREFUSED;
3606 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3608 (unsigned long) &efs);
3610 /* Send PENDING Conf Rsp */
3611 result = L2CAP_CONF_PENDING;
3612 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3617 case L2CAP_MODE_BASIC:
3618 chan->fcs = L2CAP_FCS_NONE;
3619 set_bit(CONF_MODE_DONE, &chan->conf_state);
3622 case L2CAP_MODE_ERTM:
3623 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3624 chan->remote_tx_win = rfc.txwin_size;
3626 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3628 chan->remote_max_tx = rfc.max_transmit;
3630 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3631 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3632 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3633 rfc.max_pdu_size = cpu_to_le16(size);
3634 chan->remote_mps = size;
3636 __l2cap_set_ertm_timeouts(chan, &rfc);
3638 set_bit(CONF_MODE_DONE, &chan->conf_state);
3640 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3641 sizeof(rfc), (unsigned long) &rfc);
3643 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3644 chan->remote_id = efs.id;
3645 chan->remote_stype = efs.stype;
3646 chan->remote_msdu = le16_to_cpu(efs.msdu);
3647 chan->remote_flush_to =
3648 le32_to_cpu(efs.flush_to);
3649 chan->remote_acc_lat =
3650 le32_to_cpu(efs.acc_lat);
3651 chan->remote_sdu_itime =
3652 le32_to_cpu(efs.sdu_itime);
3653 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3655 (unsigned long) &efs);
3659 case L2CAP_MODE_STREAMING:
3660 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3661 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3662 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3663 rfc.max_pdu_size = cpu_to_le16(size);
3664 chan->remote_mps = size;
3666 set_bit(CONF_MODE_DONE, &chan->conf_state);
3668 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3669 (unsigned long) &rfc);
3674 result = L2CAP_CONF_UNACCEPT;
3676 memset(&rfc, 0, sizeof(rfc));
3677 rfc.mode = chan->mode;
3680 if (result == L2CAP_CONF_SUCCESS)
3681 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3683 rsp->scid = cpu_to_le16(chan->dcid);
3684 rsp->result = cpu_to_le16(result);
3685 rsp->flags = __constant_cpu_to_le16(0);
3690 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3691 void *data, u16 *result)
3693 struct l2cap_conf_req *req = data;
3694 void *ptr = req->data;
3697 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3698 struct l2cap_conf_efs efs;
3700 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3702 while (len >= L2CAP_CONF_OPT_SIZE) {
3703 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3706 case L2CAP_CONF_MTU:
3707 if (val < L2CAP_DEFAULT_MIN_MTU) {
3708 *result = L2CAP_CONF_UNACCEPT;
3709 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3715 case L2CAP_CONF_FLUSH_TO:
3716 chan->flush_to = val;
3717 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3721 case L2CAP_CONF_RFC:
3722 if (olen == sizeof(rfc))
3723 memcpy(&rfc, (void *)val, olen);
3725 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3726 rfc.mode != chan->mode)
3727 return -ECONNREFUSED;
3731 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3732 sizeof(rfc), (unsigned long) &rfc);
3735 case L2CAP_CONF_EWS:
3736 chan->ack_win = min_t(u16, val, chan->ack_win);
3737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3741 case L2CAP_CONF_EFS:
3742 if (olen == sizeof(efs))
3743 memcpy(&efs, (void *)val, olen);
3745 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3746 efs.stype != L2CAP_SERV_NOTRAFIC &&
3747 efs.stype != chan->local_stype)
3748 return -ECONNREFUSED;
3750 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3751 (unsigned long) &efs);
3754 case L2CAP_CONF_FCS:
3755 if (*result == L2CAP_CONF_PENDING)
3756 if (val == L2CAP_FCS_NONE)
3757 set_bit(CONF_RECV_NO_FCS,
3763 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3764 return -ECONNREFUSED;
3766 chan->mode = rfc.mode;
3768 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3770 case L2CAP_MODE_ERTM:
3771 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3772 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3773 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3774 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3775 chan->ack_win = min_t(u16, chan->ack_win,
3778 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3779 chan->local_msdu = le16_to_cpu(efs.msdu);
3780 chan->local_sdu_itime =
3781 le32_to_cpu(efs.sdu_itime);
3782 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3783 chan->local_flush_to =
3784 le32_to_cpu(efs.flush_to);
3788 case L2CAP_MODE_STREAMING:
3789 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3793 req->dcid = cpu_to_le16(chan->dcid);
3794 req->flags = __constant_cpu_to_le16(0);
3799 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3800 u16 result, u16 flags)
3802 struct l2cap_conf_rsp *rsp = data;
3803 void *ptr = rsp->data;
3805 BT_DBG("chan %p", chan);
3807 rsp->scid = cpu_to_le16(chan->dcid);
3808 rsp->result = cpu_to_le16(result);
3809 rsp->flags = cpu_to_le16(flags);
3814 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3816 struct l2cap_le_conn_rsp rsp;
3817 struct l2cap_conn *conn = chan->conn;
3819 BT_DBG("chan %p", chan);
3821 rsp.dcid = cpu_to_le16(chan->scid);
3822 rsp.mtu = cpu_to_le16(chan->imtu);
3823 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
3824 rsp.credits = cpu_to_le16(chan->rx_credits);
3825 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3827 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3831 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3833 struct l2cap_conn_rsp rsp;
3834 struct l2cap_conn *conn = chan->conn;
3838 rsp.scid = cpu_to_le16(chan->dcid);
3839 rsp.dcid = cpu_to_le16(chan->scid);
3840 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3841 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3844 rsp_code = L2CAP_CREATE_CHAN_RSP;
3846 rsp_code = L2CAP_CONN_RSP;
3848 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3850 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3852 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3855 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3856 l2cap_build_conf_req(chan, buf), buf);
3857 chan->num_conf_req++;
3860 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3864 /* Use sane default values in case a misbehaving remote device
3865 * did not send an RFC or extended window size option.
3867 u16 txwin_ext = chan->ack_win;
3868 struct l2cap_conf_rfc rfc = {
3870 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3871 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3872 .max_pdu_size = cpu_to_le16(chan->imtu),
3873 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3876 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3878 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3881 while (len >= L2CAP_CONF_OPT_SIZE) {
3882 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3885 case L2CAP_CONF_RFC:
3886 if (olen == sizeof(rfc))
3887 memcpy(&rfc, (void *)val, olen);
3889 case L2CAP_CONF_EWS:
3896 case L2CAP_MODE_ERTM:
3897 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3898 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3899 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3900 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3901 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3903 chan->ack_win = min_t(u16, chan->ack_win,
3906 case L2CAP_MODE_STREAMING:
3907 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3911 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3912 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3915 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3917 if (cmd_len < sizeof(*rej))
3920 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3923 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3924 cmd->ident == conn->info_ident) {
3925 cancel_delayed_work(&conn->info_timer);
3927 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3928 conn->info_ident = 0;
3930 l2cap_conn_start(conn);
3936 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3937 struct l2cap_cmd_hdr *cmd,
3938 u8 *data, u8 rsp_code, u8 amp_id)
3940 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3941 struct l2cap_conn_rsp rsp;
3942 struct l2cap_chan *chan = NULL, *pchan;
3943 int result, status = L2CAP_CS_NO_INFO;
3945 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3946 __le16 psm = req->psm;
3948 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3950 /* Check if we have socket listening on psm */
3951 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3952 &conn->hcon->dst, ACL_LINK);
3954 result = L2CAP_CR_BAD_PSM;
3958 mutex_lock(&conn->chan_lock);
3959 l2cap_chan_lock(pchan);
3961 /* Check if the ACL is secure enough (if not SDP) */
3962 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3963 !hci_conn_check_link_mode(conn->hcon)) {
3964 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3965 result = L2CAP_CR_SEC_BLOCK;
3969 result = L2CAP_CR_NO_MEM;
3971 /* Check if we already have channel with that dcid */
3972 if (__l2cap_get_chan_by_dcid(conn, scid))
3975 chan = pchan->ops->new_connection(pchan);
3979 /* For certain devices (ex: HID mouse), support for authentication,
3980 * pairing and bonding is optional. For such devices, inorder to avoid
3981 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3982 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3984 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3986 bacpy(&chan->src, &conn->hcon->src);
3987 bacpy(&chan->dst, &conn->hcon->dst);
3988 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3989 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3992 chan->local_amp_id = amp_id;
3994 __l2cap_chan_add(conn, chan);
3998 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4000 chan->ident = cmd->ident;
4002 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4003 if (l2cap_chan_check_security(chan)) {
4004 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4005 l2cap_state_change(chan, BT_CONNECT2);
4006 result = L2CAP_CR_PEND;
4007 status = L2CAP_CS_AUTHOR_PEND;
4008 chan->ops->defer(chan);
4010 /* Force pending result for AMP controllers.
4011 * The connection will succeed after the
4012 * physical link is up.
4014 if (amp_id == AMP_ID_BREDR) {
4015 l2cap_state_change(chan, BT_CONFIG);
4016 result = L2CAP_CR_SUCCESS;
4018 l2cap_state_change(chan, BT_CONNECT2);
4019 result = L2CAP_CR_PEND;
4021 status = L2CAP_CS_NO_INFO;
4024 l2cap_state_change(chan, BT_CONNECT2);
4025 result = L2CAP_CR_PEND;
4026 status = L2CAP_CS_AUTHEN_PEND;
4029 l2cap_state_change(chan, BT_CONNECT2);
4030 result = L2CAP_CR_PEND;
4031 status = L2CAP_CS_NO_INFO;
4035 l2cap_chan_unlock(pchan);
4036 mutex_unlock(&conn->chan_lock);
4039 rsp.scid = cpu_to_le16(scid);
4040 rsp.dcid = cpu_to_le16(dcid);
4041 rsp.result = cpu_to_le16(result);
4042 rsp.status = cpu_to_le16(status);
4043 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4045 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4046 struct l2cap_info_req info;
4047 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4049 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4050 conn->info_ident = l2cap_get_ident(conn);
4052 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4054 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4055 sizeof(info), &info);
4058 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4059 result == L2CAP_CR_SUCCESS) {
4061 set_bit(CONF_REQ_SENT, &chan->conf_state);
4062 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4063 l2cap_build_conf_req(chan, buf), buf);
4064 chan->num_conf_req++;
4070 static int l2cap_connect_req(struct l2cap_conn *conn,
4071 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4073 struct hci_dev *hdev = conn->hcon->hdev;
4074 struct hci_conn *hcon = conn->hcon;
4076 if (cmd_len < sizeof(struct l2cap_conn_req))
4080 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4081 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4082 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4083 hcon->dst_type, 0, NULL, 0,
4085 hci_dev_unlock(hdev);
4087 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4091 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4095 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4096 u16 scid, dcid, result, status;
4097 struct l2cap_chan *chan;
4101 if (cmd_len < sizeof(*rsp))
4104 scid = __le16_to_cpu(rsp->scid);
4105 dcid = __le16_to_cpu(rsp->dcid);
4106 result = __le16_to_cpu(rsp->result);
4107 status = __le16_to_cpu(rsp->status);
4109 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4110 dcid, scid, result, status);
4112 mutex_lock(&conn->chan_lock);
4115 chan = __l2cap_get_chan_by_scid(conn, scid);
4121 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4130 l2cap_chan_lock(chan);
4133 case L2CAP_CR_SUCCESS:
4134 l2cap_state_change(chan, BT_CONFIG);
4137 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4139 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4142 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4143 l2cap_build_conf_req(chan, req), req);
4144 chan->num_conf_req++;
4148 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4152 l2cap_chan_del(chan, ECONNREFUSED);
4156 l2cap_chan_unlock(chan);
4159 mutex_unlock(&conn->chan_lock);
4164 static inline void set_default_fcs(struct l2cap_chan *chan)
4166 /* FCS is enabled only in ERTM or streaming mode, if one or both
4169 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4170 chan->fcs = L2CAP_FCS_NONE;
4171 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4172 chan->fcs = L2CAP_FCS_CRC16;
4175 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4176 u8 ident, u16 flags)
4178 struct l2cap_conn *conn = chan->conn;
4180 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4183 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4184 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4186 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4187 l2cap_build_conf_rsp(chan, data,
4188 L2CAP_CONF_SUCCESS, flags), data);
4191 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4194 struct l2cap_cmd_rej_cid rej;
4196 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4197 rej.scid = __cpu_to_le16(scid);
4198 rej.dcid = __cpu_to_le16(dcid);
4200 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4203 static inline int l2cap_config_req(struct l2cap_conn *conn,
4204 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4207 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4210 struct l2cap_chan *chan;
4213 if (cmd_len < sizeof(*req))
4216 dcid = __le16_to_cpu(req->dcid);
4217 flags = __le16_to_cpu(req->flags);
4219 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4221 chan = l2cap_get_chan_by_scid(conn, dcid);
4223 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4227 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4228 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4233 /* Reject if config buffer is too small. */
4234 len = cmd_len - sizeof(*req);
4235 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4236 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4237 l2cap_build_conf_rsp(chan, rsp,
4238 L2CAP_CONF_REJECT, flags), rsp);
4243 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4244 chan->conf_len += len;
4246 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4247 /* Incomplete config. Send empty response. */
4248 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4249 l2cap_build_conf_rsp(chan, rsp,
4250 L2CAP_CONF_SUCCESS, flags), rsp);
4254 /* Complete config. */
4255 len = l2cap_parse_conf_req(chan, rsp);
4257 l2cap_send_disconn_req(chan, ECONNRESET);
4261 chan->ident = cmd->ident;
4262 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4263 chan->num_conf_rsp++;
4265 /* Reset config buffer. */
4268 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4271 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4272 set_default_fcs(chan);
4274 if (chan->mode == L2CAP_MODE_ERTM ||
4275 chan->mode == L2CAP_MODE_STREAMING)
4276 err = l2cap_ertm_init(chan);
4279 l2cap_send_disconn_req(chan, -err);
4281 l2cap_chan_ready(chan);
4286 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4288 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4289 l2cap_build_conf_req(chan, buf), buf);
4290 chan->num_conf_req++;
4293 /* Got Conf Rsp PENDING from remote side and asume we sent
4294 Conf Rsp PENDING in the code above */
4295 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4296 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4298 /* check compatibility */
4300 /* Send rsp for BR/EDR channel */
4302 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4304 chan->ident = cmd->ident;
4308 l2cap_chan_unlock(chan);
4312 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4313 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4316 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4317 u16 scid, flags, result;
4318 struct l2cap_chan *chan;
4319 int len = cmd_len - sizeof(*rsp);
4322 if (cmd_len < sizeof(*rsp))
4325 scid = __le16_to_cpu(rsp->scid);
4326 flags = __le16_to_cpu(rsp->flags);
4327 result = __le16_to_cpu(rsp->result);
4329 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4332 chan = l2cap_get_chan_by_scid(conn, scid);
4337 case L2CAP_CONF_SUCCESS:
4338 l2cap_conf_rfc_get(chan, rsp->data, len);
4339 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4342 case L2CAP_CONF_PENDING:
4343 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4345 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4348 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4351 l2cap_send_disconn_req(chan, ECONNRESET);
4355 if (!chan->hs_hcon) {
4356 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4359 if (l2cap_check_efs(chan)) {
4360 amp_create_logical_link(chan);
4361 chan->ident = cmd->ident;
4367 case L2CAP_CONF_UNACCEPT:
4368 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4371 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4372 l2cap_send_disconn_req(chan, ECONNRESET);
4376 /* throw out any old stored conf requests */
4377 result = L2CAP_CONF_SUCCESS;
4378 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4381 l2cap_send_disconn_req(chan, ECONNRESET);
4385 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4386 L2CAP_CONF_REQ, len, req);
4387 chan->num_conf_req++;
4388 if (result != L2CAP_CONF_SUCCESS)
4394 l2cap_chan_set_err(chan, ECONNRESET);
4396 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4397 l2cap_send_disconn_req(chan, ECONNRESET);
4401 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4404 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4406 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4407 set_default_fcs(chan);
4409 if (chan->mode == L2CAP_MODE_ERTM ||
4410 chan->mode == L2CAP_MODE_STREAMING)
4411 err = l2cap_ertm_init(chan);
4414 l2cap_send_disconn_req(chan, -err);
4416 l2cap_chan_ready(chan);
4420 l2cap_chan_unlock(chan);
4424 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4425 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4428 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4429 struct l2cap_disconn_rsp rsp;
4431 struct l2cap_chan *chan;
4433 if (cmd_len != sizeof(*req))
4436 scid = __le16_to_cpu(req->scid);
4437 dcid = __le16_to_cpu(req->dcid);
4439 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4441 mutex_lock(&conn->chan_lock);
4443 chan = __l2cap_get_chan_by_scid(conn, dcid);
4445 mutex_unlock(&conn->chan_lock);
4446 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4450 l2cap_chan_lock(chan);
4452 rsp.dcid = cpu_to_le16(chan->scid);
4453 rsp.scid = cpu_to_le16(chan->dcid);
4454 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4456 chan->ops->set_shutdown(chan);
4458 l2cap_chan_hold(chan);
4459 l2cap_chan_del(chan, ECONNRESET);
4461 l2cap_chan_unlock(chan);
4463 chan->ops->close(chan);
4464 l2cap_chan_put(chan);
4466 mutex_unlock(&conn->chan_lock);
4471 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4472 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4475 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4477 struct l2cap_chan *chan;
4479 if (cmd_len != sizeof(*rsp))
4482 scid = __le16_to_cpu(rsp->scid);
4483 dcid = __le16_to_cpu(rsp->dcid);
4485 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4487 mutex_lock(&conn->chan_lock);
4489 chan = __l2cap_get_chan_by_scid(conn, scid);
4491 mutex_unlock(&conn->chan_lock);
4495 l2cap_chan_lock(chan);
4497 l2cap_chan_hold(chan);
4498 l2cap_chan_del(chan, 0);
4500 l2cap_chan_unlock(chan);
4502 chan->ops->close(chan);
4503 l2cap_chan_put(chan);
4505 mutex_unlock(&conn->chan_lock);
4510 static inline int l2cap_information_req(struct l2cap_conn *conn,
4511 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4514 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4517 if (cmd_len != sizeof(*req))
4520 type = __le16_to_cpu(req->type);
4522 BT_DBG("type 0x%4.4x", type);
4524 if (type == L2CAP_IT_FEAT_MASK) {
4526 u32 feat_mask = l2cap_feat_mask;
4527 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4528 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4529 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4531 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4533 if (conn->hs_enabled)
4534 feat_mask |= L2CAP_FEAT_EXT_FLOW
4535 | L2CAP_FEAT_EXT_WINDOW;
4537 put_unaligned_le32(feat_mask, rsp->data);
4538 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4540 } else if (type == L2CAP_IT_FIXED_CHAN) {
4542 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4544 if (conn->hs_enabled)
4545 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4547 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4549 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4550 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4551 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4552 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4555 struct l2cap_info_rsp rsp;
4556 rsp.type = cpu_to_le16(type);
4557 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4558 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4565 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4566 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4569 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4572 if (cmd_len < sizeof(*rsp))
4575 type = __le16_to_cpu(rsp->type);
4576 result = __le16_to_cpu(rsp->result);
4578 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4580 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4581 if (cmd->ident != conn->info_ident ||
4582 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4585 cancel_delayed_work(&conn->info_timer);
4587 if (result != L2CAP_IR_SUCCESS) {
4588 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4589 conn->info_ident = 0;
4591 l2cap_conn_start(conn);
4597 case L2CAP_IT_FEAT_MASK:
4598 conn->feat_mask = get_unaligned_le32(rsp->data);
4600 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4601 struct l2cap_info_req req;
4602 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4604 conn->info_ident = l2cap_get_ident(conn);
4606 l2cap_send_cmd(conn, conn->info_ident,
4607 L2CAP_INFO_REQ, sizeof(req), &req);
4609 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4610 conn->info_ident = 0;
4612 l2cap_conn_start(conn);
4616 case L2CAP_IT_FIXED_CHAN:
4617 conn->fixed_chan_mask = rsp->data[0];
4618 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4619 conn->info_ident = 0;
4621 l2cap_conn_start(conn);
4628 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4629 struct l2cap_cmd_hdr *cmd,
4630 u16 cmd_len, void *data)
4632 struct l2cap_create_chan_req *req = data;
4633 struct l2cap_create_chan_rsp rsp;
4634 struct l2cap_chan *chan;
4635 struct hci_dev *hdev;
4638 if (cmd_len != sizeof(*req))
4641 if (!conn->hs_enabled)
4644 psm = le16_to_cpu(req->psm);
4645 scid = le16_to_cpu(req->scid);
4647 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4649 /* For controller id 0 make BR/EDR connection */
4650 if (req->amp_id == AMP_ID_BREDR) {
4651 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4656 /* Validate AMP controller id */
4657 hdev = hci_dev_get(req->amp_id);
4661 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4666 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4669 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4670 struct hci_conn *hs_hcon;
4672 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4676 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4681 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4683 mgr->bredr_chan = chan;
4684 chan->hs_hcon = hs_hcon;
4685 chan->fcs = L2CAP_FCS_NONE;
4686 conn->mtu = hdev->block_mtu;
4695 rsp.scid = cpu_to_le16(scid);
4696 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4697 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4699 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4705 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4707 struct l2cap_move_chan_req req;
4710 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4712 ident = l2cap_get_ident(chan->conn);
4713 chan->ident = ident;
4715 req.icid = cpu_to_le16(chan->scid);
4716 req.dest_amp_id = dest_amp_id;
4718 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4721 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4724 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4726 struct l2cap_move_chan_rsp rsp;
4728 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4730 rsp.icid = cpu_to_le16(chan->dcid);
4731 rsp.result = cpu_to_le16(result);
4733 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4737 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4739 struct l2cap_move_chan_cfm cfm;
4741 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4743 chan->ident = l2cap_get_ident(chan->conn);
4745 cfm.icid = cpu_to_le16(chan->scid);
4746 cfm.result = cpu_to_le16(result);
4748 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4751 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4754 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4756 struct l2cap_move_chan_cfm cfm;
4758 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4760 cfm.icid = cpu_to_le16(icid);
4761 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4763 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4767 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4770 struct l2cap_move_chan_cfm_rsp rsp;
4772 BT_DBG("icid 0x%4.4x", icid);
4774 rsp.icid = cpu_to_le16(icid);
4775 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4778 static void __release_logical_link(struct l2cap_chan *chan)
4780 chan->hs_hchan = NULL;
4781 chan->hs_hcon = NULL;
4783 /* Placeholder - release the logical link */
4786 static void l2cap_logical_fail(struct l2cap_chan *chan)
4788 /* Logical link setup failed */
4789 if (chan->state != BT_CONNECTED) {
4790 /* Create channel failure, disconnect */
4791 l2cap_send_disconn_req(chan, ECONNRESET);
4795 switch (chan->move_role) {
4796 case L2CAP_MOVE_ROLE_RESPONDER:
4797 l2cap_move_done(chan);
4798 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4800 case L2CAP_MOVE_ROLE_INITIATOR:
4801 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4802 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4803 /* Remote has only sent pending or
4804 * success responses, clean up
4806 l2cap_move_done(chan);
4809 /* Other amp move states imply that the move
4810 * has already aborted
4812 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4817 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4818 struct hci_chan *hchan)
4820 struct l2cap_conf_rsp rsp;
4822 chan->hs_hchan = hchan;
4823 chan->hs_hcon->l2cap_data = chan->conn;
4825 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4827 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4830 set_default_fcs(chan);
4832 err = l2cap_ertm_init(chan);
4834 l2cap_send_disconn_req(chan, -err);
4836 l2cap_chan_ready(chan);
4840 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4841 struct hci_chan *hchan)
4843 chan->hs_hcon = hchan->conn;
4844 chan->hs_hcon->l2cap_data = chan->conn;
4846 BT_DBG("move_state %d", chan->move_state);
4848 switch (chan->move_state) {
4849 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4850 /* Move confirm will be sent after a success
4851 * response is received
4853 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4855 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4856 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4857 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4858 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4859 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4860 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4861 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4862 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4863 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4867 /* Move was not in expected state, free the channel */
4868 __release_logical_link(chan);
4870 chan->move_state = L2CAP_MOVE_STABLE;
4874 /* Call with chan locked */
4875 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4878 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4881 l2cap_logical_fail(chan);
4882 __release_logical_link(chan);
4886 if (chan->state != BT_CONNECTED) {
4887 /* Ignore logical link if channel is on BR/EDR */
4888 if (chan->local_amp_id != AMP_ID_BREDR)
4889 l2cap_logical_finish_create(chan, hchan);
4891 l2cap_logical_finish_move(chan, hchan);
4895 void l2cap_move_start(struct l2cap_chan *chan)
4897 BT_DBG("chan %p", chan);
4899 if (chan->local_amp_id == AMP_ID_BREDR) {
4900 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4902 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4903 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4904 /* Placeholder - start physical link setup */
4906 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4907 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4909 l2cap_move_setup(chan);
4910 l2cap_send_move_chan_req(chan, 0);
4914 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4915 u8 local_amp_id, u8 remote_amp_id)
4917 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4918 local_amp_id, remote_amp_id);
4920 chan->fcs = L2CAP_FCS_NONE;
4922 /* Outgoing channel on AMP */
4923 if (chan->state == BT_CONNECT) {
4924 if (result == L2CAP_CR_SUCCESS) {
4925 chan->local_amp_id = local_amp_id;
4926 l2cap_send_create_chan_req(chan, remote_amp_id);
4928 /* Revert to BR/EDR connect */
4929 l2cap_send_conn_req(chan);
4935 /* Incoming channel on AMP */
4936 if (__l2cap_no_conn_pending(chan)) {
4937 struct l2cap_conn_rsp rsp;
4939 rsp.scid = cpu_to_le16(chan->dcid);
4940 rsp.dcid = cpu_to_le16(chan->scid);
4942 if (result == L2CAP_CR_SUCCESS) {
4943 /* Send successful response */
4944 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4945 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4947 /* Send negative response */
4948 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4949 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4952 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4955 if (result == L2CAP_CR_SUCCESS) {
4956 l2cap_state_change(chan, BT_CONFIG);
4957 set_bit(CONF_REQ_SENT, &chan->conf_state);
4958 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4960 l2cap_build_conf_req(chan, buf), buf);
4961 chan->num_conf_req++;
4966 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4969 l2cap_move_setup(chan);
4970 chan->move_id = local_amp_id;
4971 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4973 l2cap_send_move_chan_req(chan, remote_amp_id);
4976 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4978 struct hci_chan *hchan = NULL;
4980 /* Placeholder - get hci_chan for logical link */
4983 if (hchan->state == BT_CONNECTED) {
4984 /* Logical link is ready to go */
4985 chan->hs_hcon = hchan->conn;
4986 chan->hs_hcon->l2cap_data = chan->conn;
4987 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4988 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4990 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4992 /* Wait for logical link to be ready */
4993 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4996 /* Logical link not available */
4997 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5001 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5003 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5005 if (result == -EINVAL)
5006 rsp_result = L2CAP_MR_BAD_ID;
5008 rsp_result = L2CAP_MR_NOT_ALLOWED;
5010 l2cap_send_move_chan_rsp(chan, rsp_result);
5013 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5014 chan->move_state = L2CAP_MOVE_STABLE;
5016 /* Restart data transmission */
5017 l2cap_ertm_send(chan);
5020 /* Invoke with locked chan */
5021 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5023 u8 local_amp_id = chan->local_amp_id;
5024 u8 remote_amp_id = chan->remote_amp_id;
5026 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5027 chan, result, local_amp_id, remote_amp_id);
5029 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5030 l2cap_chan_unlock(chan);
5034 if (chan->state != BT_CONNECTED) {
5035 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5036 } else if (result != L2CAP_MR_SUCCESS) {
5037 l2cap_do_move_cancel(chan, result);
5039 switch (chan->move_role) {
5040 case L2CAP_MOVE_ROLE_INITIATOR:
5041 l2cap_do_move_initiate(chan, local_amp_id,
5044 case L2CAP_MOVE_ROLE_RESPONDER:
5045 l2cap_do_move_respond(chan, result);
5048 l2cap_do_move_cancel(chan, result);
5054 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5055 struct l2cap_cmd_hdr *cmd,
5056 u16 cmd_len, void *data)
5058 struct l2cap_move_chan_req *req = data;
5059 struct l2cap_move_chan_rsp rsp;
5060 struct l2cap_chan *chan;
5062 u16 result = L2CAP_MR_NOT_ALLOWED;
5064 if (cmd_len != sizeof(*req))
5067 icid = le16_to_cpu(req->icid);
5069 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5071 if (!conn->hs_enabled)
5074 chan = l2cap_get_chan_by_dcid(conn, icid);
5076 rsp.icid = cpu_to_le16(icid);
5077 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5078 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5083 chan->ident = cmd->ident;
5085 if (chan->scid < L2CAP_CID_DYN_START ||
5086 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5087 (chan->mode != L2CAP_MODE_ERTM &&
5088 chan->mode != L2CAP_MODE_STREAMING)) {
5089 result = L2CAP_MR_NOT_ALLOWED;
5090 goto send_move_response;
5093 if (chan->local_amp_id == req->dest_amp_id) {
5094 result = L2CAP_MR_SAME_ID;
5095 goto send_move_response;
5098 if (req->dest_amp_id != AMP_ID_BREDR) {
5099 struct hci_dev *hdev;
5100 hdev = hci_dev_get(req->dest_amp_id);
5101 if (!hdev || hdev->dev_type != HCI_AMP ||
5102 !test_bit(HCI_UP, &hdev->flags)) {
5106 result = L2CAP_MR_BAD_ID;
5107 goto send_move_response;
5112 /* Detect a move collision. Only send a collision response
5113 * if this side has "lost", otherwise proceed with the move.
5114 * The winner has the larger bd_addr.
5116 if ((__chan_is_moving(chan) ||
5117 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5118 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5119 result = L2CAP_MR_COLLISION;
5120 goto send_move_response;
5123 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5124 l2cap_move_setup(chan);
5125 chan->move_id = req->dest_amp_id;
5128 if (req->dest_amp_id == AMP_ID_BREDR) {
5129 /* Moving to BR/EDR */
5130 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5131 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5132 result = L2CAP_MR_PEND;
5134 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5135 result = L2CAP_MR_SUCCESS;
5138 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5139 /* Placeholder - uncomment when amp functions are available */
5140 /*amp_accept_physical(chan, req->dest_amp_id);*/
5141 result = L2CAP_MR_PEND;
5145 l2cap_send_move_chan_rsp(chan, result);
5147 l2cap_chan_unlock(chan);
5152 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5154 struct l2cap_chan *chan;
5155 struct hci_chan *hchan = NULL;
5157 chan = l2cap_get_chan_by_scid(conn, icid);
5159 l2cap_send_move_chan_cfm_icid(conn, icid);
5163 __clear_chan_timer(chan);
5164 if (result == L2CAP_MR_PEND)
5165 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5167 switch (chan->move_state) {
5168 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5169 /* Move confirm will be sent when logical link
5172 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5174 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5175 if (result == L2CAP_MR_PEND) {
5177 } else if (test_bit(CONN_LOCAL_BUSY,
5178 &chan->conn_state)) {
5179 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5181 /* Logical link is up or moving to BR/EDR,
5184 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5185 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5188 case L2CAP_MOVE_WAIT_RSP:
5190 if (result == L2CAP_MR_SUCCESS) {
5191 /* Remote is ready, send confirm immediately
5192 * after logical link is ready
5194 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5196 /* Both logical link and move success
5197 * are required to confirm
5199 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5202 /* Placeholder - get hci_chan for logical link */
5204 /* Logical link not available */
5205 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5209 /* If the logical link is not yet connected, do not
5210 * send confirmation.
5212 if (hchan->state != BT_CONNECTED)
5215 /* Logical link is already ready to go */
5217 chan->hs_hcon = hchan->conn;
5218 chan->hs_hcon->l2cap_data = chan->conn;
5220 if (result == L2CAP_MR_SUCCESS) {
5221 /* Can confirm now */
5222 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5224 /* Now only need move success
5227 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5230 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5233 /* Any other amp move state means the move failed. */
5234 chan->move_id = chan->local_amp_id;
5235 l2cap_move_done(chan);
5236 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5239 l2cap_chan_unlock(chan);
5242 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5245 struct l2cap_chan *chan;
5247 chan = l2cap_get_chan_by_ident(conn, ident);
5249 /* Could not locate channel, icid is best guess */
5250 l2cap_send_move_chan_cfm_icid(conn, icid);
5254 __clear_chan_timer(chan);
5256 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5257 if (result == L2CAP_MR_COLLISION) {
5258 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5260 /* Cleanup - cancel move */
5261 chan->move_id = chan->local_amp_id;
5262 l2cap_move_done(chan);
5266 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5268 l2cap_chan_unlock(chan);
5271 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5272 struct l2cap_cmd_hdr *cmd,
5273 u16 cmd_len, void *data)
5275 struct l2cap_move_chan_rsp *rsp = data;
5278 if (cmd_len != sizeof(*rsp))
5281 icid = le16_to_cpu(rsp->icid);
5282 result = le16_to_cpu(rsp->result);
5284 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5286 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5287 l2cap_move_continue(conn, icid, result);
5289 l2cap_move_fail(conn, cmd->ident, icid, result);
5294 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5295 struct l2cap_cmd_hdr *cmd,
5296 u16 cmd_len, void *data)
5298 struct l2cap_move_chan_cfm *cfm = data;
5299 struct l2cap_chan *chan;
5302 if (cmd_len != sizeof(*cfm))
5305 icid = le16_to_cpu(cfm->icid);
5306 result = le16_to_cpu(cfm->result);
5308 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5310 chan = l2cap_get_chan_by_dcid(conn, icid);
5312 /* Spec requires a response even if the icid was not found */
5313 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5317 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5318 if (result == L2CAP_MC_CONFIRMED) {
5319 chan->local_amp_id = chan->move_id;
5320 if (chan->local_amp_id == AMP_ID_BREDR)
5321 __release_logical_link(chan);
5323 chan->move_id = chan->local_amp_id;
5326 l2cap_move_done(chan);
5329 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5331 l2cap_chan_unlock(chan);
5336 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5337 struct l2cap_cmd_hdr *cmd,
5338 u16 cmd_len, void *data)
5340 struct l2cap_move_chan_cfm_rsp *rsp = data;
5341 struct l2cap_chan *chan;
5344 if (cmd_len != sizeof(*rsp))
5347 icid = le16_to_cpu(rsp->icid);
5349 BT_DBG("icid 0x%4.4x", icid);
5351 chan = l2cap_get_chan_by_scid(conn, icid);
5355 __clear_chan_timer(chan);
5357 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5358 chan->local_amp_id = chan->move_id;
5360 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5361 __release_logical_link(chan);
5363 l2cap_move_done(chan);
5366 l2cap_chan_unlock(chan);
5371 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5376 if (min > max || min < 6 || max > 3200)
5379 if (to_multiplier < 10 || to_multiplier > 3200)
5382 if (max >= to_multiplier * 8)
5385 max_latency = (to_multiplier * 8 / max) - 1;
5386 if (latency > 499 || latency > max_latency)
5392 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5393 struct l2cap_cmd_hdr *cmd,
5394 u16 cmd_len, u8 *data)
5396 struct hci_conn *hcon = conn->hcon;
5397 struct l2cap_conn_param_update_req *req;
5398 struct l2cap_conn_param_update_rsp rsp;
5399 u16 min, max, latency, to_multiplier;
5402 if (!(hcon->link_mode & HCI_LM_MASTER))
5405 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5408 req = (struct l2cap_conn_param_update_req *) data;
5409 min = __le16_to_cpu(req->min);
5410 max = __le16_to_cpu(req->max);
5411 latency = __le16_to_cpu(req->latency);
5412 to_multiplier = __le16_to_cpu(req->to_multiplier);
5414 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5415 min, max, latency, to_multiplier);
5417 memset(&rsp, 0, sizeof(rsp));
5419 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5421 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5423 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5425 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5429 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5434 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5435 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5438 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5439 u16 dcid, mtu, mps, credits, result;
5440 struct l2cap_chan *chan;
5443 if (cmd_len < sizeof(*rsp))
5446 dcid = __le16_to_cpu(rsp->dcid);
5447 mtu = __le16_to_cpu(rsp->mtu);
5448 mps = __le16_to_cpu(rsp->mps);
5449 credits = __le16_to_cpu(rsp->credits);
5450 result = __le16_to_cpu(rsp->result);
5452 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5455 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5456 dcid, mtu, mps, credits, result);
5458 mutex_lock(&conn->chan_lock);
5460 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5468 l2cap_chan_lock(chan);
5471 case L2CAP_CR_SUCCESS:
5475 chan->remote_mps = mps;
5476 chan->tx_credits = credits;
5477 l2cap_chan_ready(chan);
5481 l2cap_chan_del(chan, ECONNREFUSED);
5485 l2cap_chan_unlock(chan);
5488 mutex_unlock(&conn->chan_lock);
5493 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5494 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5499 switch (cmd->code) {
5500 case L2CAP_COMMAND_REJ:
5501 l2cap_command_rej(conn, cmd, cmd_len, data);
5504 case L2CAP_CONN_REQ:
5505 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5508 case L2CAP_CONN_RSP:
5509 case L2CAP_CREATE_CHAN_RSP:
5510 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5513 case L2CAP_CONF_REQ:
5514 err = l2cap_config_req(conn, cmd, cmd_len, data);
5517 case L2CAP_CONF_RSP:
5518 l2cap_config_rsp(conn, cmd, cmd_len, data);
5521 case L2CAP_DISCONN_REQ:
5522 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5525 case L2CAP_DISCONN_RSP:
5526 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5529 case L2CAP_ECHO_REQ:
5530 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5533 case L2CAP_ECHO_RSP:
5536 case L2CAP_INFO_REQ:
5537 err = l2cap_information_req(conn, cmd, cmd_len, data);
5540 case L2CAP_INFO_RSP:
5541 l2cap_information_rsp(conn, cmd, cmd_len, data);
5544 case L2CAP_CREATE_CHAN_REQ:
5545 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5548 case L2CAP_MOVE_CHAN_REQ:
5549 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5552 case L2CAP_MOVE_CHAN_RSP:
5553 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5556 case L2CAP_MOVE_CHAN_CFM:
5557 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5560 case L2CAP_MOVE_CHAN_CFM_RSP:
5561 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5565 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5573 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5574 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5577 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5578 struct l2cap_le_conn_rsp rsp;
5579 struct l2cap_chan *chan, *pchan;
5580 u16 dcid, scid, credits, mtu, mps;
5584 if (cmd_len != sizeof(*req))
5587 scid = __le16_to_cpu(req->scid);
5588 mtu = __le16_to_cpu(req->mtu);
5589 mps = __le16_to_cpu(req->mps);
5594 if (mtu < 23 || mps < 23)
5597 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5600 /* Check if we have socket listening on psm */
5601 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5602 &conn->hcon->dst, LE_LINK);
5604 result = L2CAP_CR_BAD_PSM;
5609 mutex_lock(&conn->chan_lock);
5610 l2cap_chan_lock(pchan);
5612 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5613 result = L2CAP_CR_AUTHENTICATION;
5615 goto response_unlock;
5618 /* Check if we already have channel with that dcid */
5619 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5620 result = L2CAP_CR_NO_MEM;
5622 goto response_unlock;
5625 chan = pchan->ops->new_connection(pchan);
5627 result = L2CAP_CR_NO_MEM;
5628 goto response_unlock;
5631 bacpy(&chan->src, &conn->hcon->src);
5632 bacpy(&chan->dst, &conn->hcon->dst);
5633 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5634 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5638 chan->remote_mps = mps;
5639 chan->tx_credits = __le16_to_cpu(req->credits);
5641 __l2cap_chan_add(conn, chan);
5643 credits = chan->rx_credits;
5645 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5647 chan->ident = cmd->ident;
5649 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5650 l2cap_state_change(chan, BT_CONNECT2);
5651 result = L2CAP_CR_PEND;
5652 chan->ops->defer(chan);
5654 l2cap_chan_ready(chan);
5655 result = L2CAP_CR_SUCCESS;
5659 l2cap_chan_unlock(pchan);
5660 mutex_unlock(&conn->chan_lock);
5662 if (result == L2CAP_CR_PEND)
5667 rsp.mtu = cpu_to_le16(chan->imtu);
5668 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
5674 rsp.dcid = cpu_to_le16(dcid);
5675 rsp.credits = cpu_to_le16(credits);
5676 rsp.result = cpu_to_le16(result);
5678 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5683 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5684 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5687 struct l2cap_le_credits *pkt;
5688 struct l2cap_chan *chan;
5691 if (cmd_len != sizeof(*pkt))
5694 pkt = (struct l2cap_le_credits *) data;
5695 cid = __le16_to_cpu(pkt->cid);
5696 credits = __le16_to_cpu(pkt->credits);
5698 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5700 chan = l2cap_get_chan_by_dcid(conn, cid);
5704 chan->tx_credits += credits;
5706 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5707 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5711 if (chan->tx_credits)
5712 chan->ops->resume(chan);
5714 l2cap_chan_unlock(chan);
5719 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5720 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5725 if (!enable_lecoc) {
5726 switch (cmd->code) {
5727 case L2CAP_LE_CONN_REQ:
5728 case L2CAP_LE_CONN_RSP:
5729 case L2CAP_LE_CREDITS:
5730 case L2CAP_DISCONN_REQ:
5731 case L2CAP_DISCONN_RSP:
5736 switch (cmd->code) {
5737 case L2CAP_COMMAND_REJ:
5740 case L2CAP_CONN_PARAM_UPDATE_REQ:
5741 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5744 case L2CAP_CONN_PARAM_UPDATE_RSP:
5747 case L2CAP_LE_CONN_RSP:
5748 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5751 case L2CAP_LE_CONN_REQ:
5752 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5755 case L2CAP_LE_CREDITS:
5756 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5759 case L2CAP_DISCONN_REQ:
5760 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5763 case L2CAP_DISCONN_RSP:
5764 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5768 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5776 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5777 struct sk_buff *skb)
5779 struct hci_conn *hcon = conn->hcon;
5780 struct l2cap_cmd_hdr *cmd;
5784 if (hcon->type != LE_LINK)
5787 if (skb->len < L2CAP_CMD_HDR_SIZE)
5790 cmd = (void *) skb->data;
5791 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5793 len = le16_to_cpu(cmd->len);
5795 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5797 if (len != skb->len || !cmd->ident) {
5798 BT_DBG("corrupted command");
5802 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5804 struct l2cap_cmd_rej_unk rej;
5806 BT_ERR("Wrong link type (%d)", err);
5808 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5809 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5817 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5818 struct sk_buff *skb)
5820 struct hci_conn *hcon = conn->hcon;
5821 u8 *data = skb->data;
5823 struct l2cap_cmd_hdr cmd;
5826 l2cap_raw_recv(conn, skb);
5828 if (hcon->type != ACL_LINK)
5831 while (len >= L2CAP_CMD_HDR_SIZE) {
5833 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5834 data += L2CAP_CMD_HDR_SIZE;
5835 len -= L2CAP_CMD_HDR_SIZE;
5837 cmd_len = le16_to_cpu(cmd.len);
5839 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5842 if (cmd_len > len || !cmd.ident) {
5843 BT_DBG("corrupted command");
5847 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5849 struct l2cap_cmd_rej_unk rej;
5851 BT_ERR("Wrong link type (%d)", err);
5853 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5854 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5866 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5868 u16 our_fcs, rcv_fcs;
5871 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5872 hdr_size = L2CAP_EXT_HDR_SIZE;
5874 hdr_size = L2CAP_ENH_HDR_SIZE;
5876 if (chan->fcs == L2CAP_FCS_CRC16) {
5877 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5878 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5879 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5881 if (our_fcs != rcv_fcs)
5887 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5889 struct l2cap_ctrl control;
5891 BT_DBG("chan %p", chan);
5893 memset(&control, 0, sizeof(control));
5896 control.reqseq = chan->buffer_seq;
5897 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5899 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5900 control.super = L2CAP_SUPER_RNR;
5901 l2cap_send_sframe(chan, &control);
5904 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5905 chan->unacked_frames > 0)
5906 __set_retrans_timer(chan);
5908 /* Send pending iframes */
5909 l2cap_ertm_send(chan);
5911 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5912 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5913 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5916 control.super = L2CAP_SUPER_RR;
5917 l2cap_send_sframe(chan, &control);
5921 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5922 struct sk_buff **last_frag)
5924 /* skb->len reflects data in skb as well as all fragments
5925 * skb->data_len reflects only data in fragments
5927 if (!skb_has_frag_list(skb))
5928 skb_shinfo(skb)->frag_list = new_frag;
5930 new_frag->next = NULL;
5932 (*last_frag)->next = new_frag;
5933 *last_frag = new_frag;
5935 skb->len += new_frag->len;
5936 skb->data_len += new_frag->len;
5937 skb->truesize += new_frag->truesize;
5940 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5941 struct l2cap_ctrl *control)
5945 switch (control->sar) {
5946 case L2CAP_SAR_UNSEGMENTED:
5950 err = chan->ops->recv(chan, skb);
5953 case L2CAP_SAR_START:
5957 chan->sdu_len = get_unaligned_le16(skb->data);
5958 skb_pull(skb, L2CAP_SDULEN_SIZE);
5960 if (chan->sdu_len > chan->imtu) {
5965 if (skb->len >= chan->sdu_len)
5969 chan->sdu_last_frag = skb;
5975 case L2CAP_SAR_CONTINUE:
5979 append_skb_frag(chan->sdu, skb,
5980 &chan->sdu_last_frag);
5983 if (chan->sdu->len >= chan->sdu_len)
5993 append_skb_frag(chan->sdu, skb,
5994 &chan->sdu_last_frag);
5997 if (chan->sdu->len != chan->sdu_len)
6000 err = chan->ops->recv(chan, chan->sdu);
6003 /* Reassembly complete */
6005 chan->sdu_last_frag = NULL;
6013 kfree_skb(chan->sdu);
6015 chan->sdu_last_frag = NULL;
6022 static int l2cap_resegment(struct l2cap_chan *chan)
6028 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6032 if (chan->mode != L2CAP_MODE_ERTM)
6035 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6036 l2cap_tx(chan, NULL, NULL, event);
6039 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6042 /* Pass sequential frames to l2cap_reassemble_sdu()
6043 * until a gap is encountered.
6046 BT_DBG("chan %p", chan);
6048 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6049 struct sk_buff *skb;
6050 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6051 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6053 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6058 skb_unlink(skb, &chan->srej_q);
6059 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6060 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6065 if (skb_queue_empty(&chan->srej_q)) {
6066 chan->rx_state = L2CAP_RX_STATE_RECV;
6067 l2cap_send_ack(chan);
6073 static void l2cap_handle_srej(struct l2cap_chan *chan,
6074 struct l2cap_ctrl *control)
6076 struct sk_buff *skb;
6078 BT_DBG("chan %p, control %p", chan, control);
6080 if (control->reqseq == chan->next_tx_seq) {
6081 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6082 l2cap_send_disconn_req(chan, ECONNRESET);
6086 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6089 BT_DBG("Seq %d not available for retransmission",
6094 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6095 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6096 l2cap_send_disconn_req(chan, ECONNRESET);
6100 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6102 if (control->poll) {
6103 l2cap_pass_to_tx(chan, control);
6105 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6106 l2cap_retransmit(chan, control);
6107 l2cap_ertm_send(chan);
6109 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6110 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6111 chan->srej_save_reqseq = control->reqseq;
6114 l2cap_pass_to_tx_fbit(chan, control);
6116 if (control->final) {
6117 if (chan->srej_save_reqseq != control->reqseq ||
6118 !test_and_clear_bit(CONN_SREJ_ACT,
6120 l2cap_retransmit(chan, control);
6122 l2cap_retransmit(chan, control);
6123 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6124 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6125 chan->srej_save_reqseq = control->reqseq;
6131 static void l2cap_handle_rej(struct l2cap_chan *chan,
6132 struct l2cap_ctrl *control)
6134 struct sk_buff *skb;
6136 BT_DBG("chan %p, control %p", chan, control);
6138 if (control->reqseq == chan->next_tx_seq) {
6139 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6140 l2cap_send_disconn_req(chan, ECONNRESET);
6144 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6146 if (chan->max_tx && skb &&
6147 bt_cb(skb)->control.retries >= chan->max_tx) {
6148 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6149 l2cap_send_disconn_req(chan, ECONNRESET);
6153 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6155 l2cap_pass_to_tx(chan, control);
6157 if (control->final) {
6158 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6159 l2cap_retransmit_all(chan, control);
6161 l2cap_retransmit_all(chan, control);
6162 l2cap_ertm_send(chan);
6163 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6164 set_bit(CONN_REJ_ACT, &chan->conn_state);
6168 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6170 BT_DBG("chan %p, txseq %d", chan, txseq);
6172 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6173 chan->expected_tx_seq);
6175 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6176 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6178 /* See notes below regarding "double poll" and
6181 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6182 BT_DBG("Invalid/Ignore - after SREJ");
6183 return L2CAP_TXSEQ_INVALID_IGNORE;
6185 BT_DBG("Invalid - in window after SREJ sent");
6186 return L2CAP_TXSEQ_INVALID;
6190 if (chan->srej_list.head == txseq) {
6191 BT_DBG("Expected SREJ");
6192 return L2CAP_TXSEQ_EXPECTED_SREJ;
6195 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6196 BT_DBG("Duplicate SREJ - txseq already stored");
6197 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6200 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6201 BT_DBG("Unexpected SREJ - not requested");
6202 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6206 if (chan->expected_tx_seq == txseq) {
6207 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6209 BT_DBG("Invalid - txseq outside tx window");
6210 return L2CAP_TXSEQ_INVALID;
6213 return L2CAP_TXSEQ_EXPECTED;
6217 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6218 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6219 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6220 return L2CAP_TXSEQ_DUPLICATE;
6223 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6224 /* A source of invalid packets is a "double poll" condition,
6225 * where delays cause us to send multiple poll packets. If
6226 * the remote stack receives and processes both polls,
6227 * sequence numbers can wrap around in such a way that a
6228 * resent frame has a sequence number that looks like new data
6229 * with a sequence gap. This would trigger an erroneous SREJ
6232 * Fortunately, this is impossible with a tx window that's
6233 * less than half of the maximum sequence number, which allows
6234 * invalid frames to be safely ignored.
6236 * With tx window sizes greater than half of the tx window
6237 * maximum, the frame is invalid and cannot be ignored. This
6238 * causes a disconnect.
6241 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6242 BT_DBG("Invalid/Ignore - txseq outside tx window");
6243 return L2CAP_TXSEQ_INVALID_IGNORE;
6245 BT_DBG("Invalid - txseq outside tx window");
6246 return L2CAP_TXSEQ_INVALID;
6249 BT_DBG("Unexpected - txseq indicates missing frames");
6250 return L2CAP_TXSEQ_UNEXPECTED;
6254 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6255 struct l2cap_ctrl *control,
6256 struct sk_buff *skb, u8 event)
6259 bool skb_in_use = false;
6261 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6265 case L2CAP_EV_RECV_IFRAME:
6266 switch (l2cap_classify_txseq(chan, control->txseq)) {
6267 case L2CAP_TXSEQ_EXPECTED:
6268 l2cap_pass_to_tx(chan, control);
6270 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6271 BT_DBG("Busy, discarding expected seq %d",
6276 chan->expected_tx_seq = __next_seq(chan,
6279 chan->buffer_seq = chan->expected_tx_seq;
6282 err = l2cap_reassemble_sdu(chan, skb, control);
6286 if (control->final) {
6287 if (!test_and_clear_bit(CONN_REJ_ACT,
6288 &chan->conn_state)) {
6290 l2cap_retransmit_all(chan, control);
6291 l2cap_ertm_send(chan);
6295 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6296 l2cap_send_ack(chan);
6298 case L2CAP_TXSEQ_UNEXPECTED:
6299 l2cap_pass_to_tx(chan, control);
6301 /* Can't issue SREJ frames in the local busy state.
6302 * Drop this frame, it will be seen as missing
6303 * when local busy is exited.
6305 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6306 BT_DBG("Busy, discarding unexpected seq %d",
6311 /* There was a gap in the sequence, so an SREJ
6312 * must be sent for each missing frame. The
6313 * current frame is stored for later use.
6315 skb_queue_tail(&chan->srej_q, skb);
6317 BT_DBG("Queued %p (queue len %d)", skb,
6318 skb_queue_len(&chan->srej_q));
6320 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6321 l2cap_seq_list_clear(&chan->srej_list);
6322 l2cap_send_srej(chan, control->txseq);
6324 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6326 case L2CAP_TXSEQ_DUPLICATE:
6327 l2cap_pass_to_tx(chan, control);
6329 case L2CAP_TXSEQ_INVALID_IGNORE:
6331 case L2CAP_TXSEQ_INVALID:
6333 l2cap_send_disconn_req(chan, ECONNRESET);
6337 case L2CAP_EV_RECV_RR:
6338 l2cap_pass_to_tx(chan, control);
6339 if (control->final) {
6340 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6342 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6343 !__chan_is_moving(chan)) {
6345 l2cap_retransmit_all(chan, control);
6348 l2cap_ertm_send(chan);
6349 } else if (control->poll) {
6350 l2cap_send_i_or_rr_or_rnr(chan);
6352 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6353 &chan->conn_state) &&
6354 chan->unacked_frames)
6355 __set_retrans_timer(chan);
6357 l2cap_ertm_send(chan);
6360 case L2CAP_EV_RECV_RNR:
6361 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6362 l2cap_pass_to_tx(chan, control);
6363 if (control && control->poll) {
6364 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6365 l2cap_send_rr_or_rnr(chan, 0);
6367 __clear_retrans_timer(chan);
6368 l2cap_seq_list_clear(&chan->retrans_list);
6370 case L2CAP_EV_RECV_REJ:
6371 l2cap_handle_rej(chan, control);
6373 case L2CAP_EV_RECV_SREJ:
6374 l2cap_handle_srej(chan, control);
6380 if (skb && !skb_in_use) {
6381 BT_DBG("Freeing %p", skb);
6388 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6389 struct l2cap_ctrl *control,
6390 struct sk_buff *skb, u8 event)
6393 u16 txseq = control->txseq;
6394 bool skb_in_use = false;
6396 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6400 case L2CAP_EV_RECV_IFRAME:
6401 switch (l2cap_classify_txseq(chan, txseq)) {
6402 case L2CAP_TXSEQ_EXPECTED:
6403 /* Keep frame for reassembly later */
6404 l2cap_pass_to_tx(chan, control);
6405 skb_queue_tail(&chan->srej_q, skb);
6407 BT_DBG("Queued %p (queue len %d)", skb,
6408 skb_queue_len(&chan->srej_q));
6410 chan->expected_tx_seq = __next_seq(chan, txseq);
6412 case L2CAP_TXSEQ_EXPECTED_SREJ:
6413 l2cap_seq_list_pop(&chan->srej_list);
6415 l2cap_pass_to_tx(chan, control);
6416 skb_queue_tail(&chan->srej_q, skb);
6418 BT_DBG("Queued %p (queue len %d)", skb,
6419 skb_queue_len(&chan->srej_q));
6421 err = l2cap_rx_queued_iframes(chan);
6426 case L2CAP_TXSEQ_UNEXPECTED:
6427 /* Got a frame that can't be reassembled yet.
6428 * Save it for later, and send SREJs to cover
6429 * the missing frames.
6431 skb_queue_tail(&chan->srej_q, skb);
6433 BT_DBG("Queued %p (queue len %d)", skb,
6434 skb_queue_len(&chan->srej_q));
6436 l2cap_pass_to_tx(chan, control);
6437 l2cap_send_srej(chan, control->txseq);
6439 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6440 /* This frame was requested with an SREJ, but
6441 * some expected retransmitted frames are
6442 * missing. Request retransmission of missing
6445 skb_queue_tail(&chan->srej_q, skb);
6447 BT_DBG("Queued %p (queue len %d)", skb,
6448 skb_queue_len(&chan->srej_q));
6450 l2cap_pass_to_tx(chan, control);
6451 l2cap_send_srej_list(chan, control->txseq);
6453 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6454 /* We've already queued this frame. Drop this copy. */
6455 l2cap_pass_to_tx(chan, control);
6457 case L2CAP_TXSEQ_DUPLICATE:
6458 /* Expecting a later sequence number, so this frame
6459 * was already received. Ignore it completely.
6462 case L2CAP_TXSEQ_INVALID_IGNORE:
6464 case L2CAP_TXSEQ_INVALID:
6466 l2cap_send_disconn_req(chan, ECONNRESET);
6470 case L2CAP_EV_RECV_RR:
6471 l2cap_pass_to_tx(chan, control);
6472 if (control->final) {
6473 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6475 if (!test_and_clear_bit(CONN_REJ_ACT,
6476 &chan->conn_state)) {
6478 l2cap_retransmit_all(chan, control);
6481 l2cap_ertm_send(chan);
6482 } else if (control->poll) {
6483 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6484 &chan->conn_state) &&
6485 chan->unacked_frames) {
6486 __set_retrans_timer(chan);
6489 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6490 l2cap_send_srej_tail(chan);
6492 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6493 &chan->conn_state) &&
6494 chan->unacked_frames)
6495 __set_retrans_timer(chan);
6497 l2cap_send_ack(chan);
6500 case L2CAP_EV_RECV_RNR:
6501 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6502 l2cap_pass_to_tx(chan, control);
6503 if (control->poll) {
6504 l2cap_send_srej_tail(chan);
6506 struct l2cap_ctrl rr_control;
6507 memset(&rr_control, 0, sizeof(rr_control));
6508 rr_control.sframe = 1;
6509 rr_control.super = L2CAP_SUPER_RR;
6510 rr_control.reqseq = chan->buffer_seq;
6511 l2cap_send_sframe(chan, &rr_control);
6515 case L2CAP_EV_RECV_REJ:
6516 l2cap_handle_rej(chan, control);
6518 case L2CAP_EV_RECV_SREJ:
6519 l2cap_handle_srej(chan, control);
6523 if (skb && !skb_in_use) {
6524 BT_DBG("Freeing %p", skb);
6531 static int l2cap_finish_move(struct l2cap_chan *chan)
6533 BT_DBG("chan %p", chan);
6535 chan->rx_state = L2CAP_RX_STATE_RECV;
6538 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6540 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6542 return l2cap_resegment(chan);
6545 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6546 struct l2cap_ctrl *control,
6547 struct sk_buff *skb, u8 event)
6551 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6557 l2cap_process_reqseq(chan, control->reqseq);
6559 if (!skb_queue_empty(&chan->tx_q))
6560 chan->tx_send_head = skb_peek(&chan->tx_q);
6562 chan->tx_send_head = NULL;
6564 /* Rewind next_tx_seq to the point expected
6567 chan->next_tx_seq = control->reqseq;
6568 chan->unacked_frames = 0;
6570 err = l2cap_finish_move(chan);
6574 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6575 l2cap_send_i_or_rr_or_rnr(chan);
6577 if (event == L2CAP_EV_RECV_IFRAME)
6580 return l2cap_rx_state_recv(chan, control, NULL, event);
6583 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6584 struct l2cap_ctrl *control,
6585 struct sk_buff *skb, u8 event)
6589 if (!control->final)
6592 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6594 chan->rx_state = L2CAP_RX_STATE_RECV;
6595 l2cap_process_reqseq(chan, control->reqseq);
6597 if (!skb_queue_empty(&chan->tx_q))
6598 chan->tx_send_head = skb_peek(&chan->tx_q);
6600 chan->tx_send_head = NULL;
6602 /* Rewind next_tx_seq to the point expected
6605 chan->next_tx_seq = control->reqseq;
6606 chan->unacked_frames = 0;
6609 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6611 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6613 err = l2cap_resegment(chan);
6616 err = l2cap_rx_state_recv(chan, control, skb, event);
6621 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6623 /* Make sure reqseq is for a packet that has been sent but not acked */
6626 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6627 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6630 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6631 struct sk_buff *skb, u8 event)
6635 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6636 control, skb, event, chan->rx_state);
6638 if (__valid_reqseq(chan, control->reqseq)) {
6639 switch (chan->rx_state) {
6640 case L2CAP_RX_STATE_RECV:
6641 err = l2cap_rx_state_recv(chan, control, skb, event);
6643 case L2CAP_RX_STATE_SREJ_SENT:
6644 err = l2cap_rx_state_srej_sent(chan, control, skb,
6647 case L2CAP_RX_STATE_WAIT_P:
6648 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6650 case L2CAP_RX_STATE_WAIT_F:
6651 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6658 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6659 control->reqseq, chan->next_tx_seq,
6660 chan->expected_ack_seq);
6661 l2cap_send_disconn_req(chan, ECONNRESET);
6667 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6668 struct sk_buff *skb)
6672 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6675 if (l2cap_classify_txseq(chan, control->txseq) ==
6676 L2CAP_TXSEQ_EXPECTED) {
6677 l2cap_pass_to_tx(chan, control);
6679 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6680 __next_seq(chan, chan->buffer_seq));
6682 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6684 l2cap_reassemble_sdu(chan, skb, control);
6687 kfree_skb(chan->sdu);
6690 chan->sdu_last_frag = NULL;
6694 BT_DBG("Freeing %p", skb);
6699 chan->last_acked_seq = control->txseq;
6700 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6705 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6707 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6711 __unpack_control(chan, skb);
6716 * We can just drop the corrupted I-frame here.
6717 * Receiver will miss it and start proper recovery
6718 * procedures and ask for retransmission.
6720 if (l2cap_check_fcs(chan, skb))
6723 if (!control->sframe && control->sar == L2CAP_SAR_START)
6724 len -= L2CAP_SDULEN_SIZE;
6726 if (chan->fcs == L2CAP_FCS_CRC16)
6727 len -= L2CAP_FCS_SIZE;
6729 if (len > chan->mps) {
6730 l2cap_send_disconn_req(chan, ECONNRESET);
6734 if (!control->sframe) {
6737 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6738 control->sar, control->reqseq, control->final,
6741 /* Validate F-bit - F=0 always valid, F=1 only
6742 * valid in TX WAIT_F
6744 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6747 if (chan->mode != L2CAP_MODE_STREAMING) {
6748 event = L2CAP_EV_RECV_IFRAME;
6749 err = l2cap_rx(chan, control, skb, event);
6751 err = l2cap_stream_rx(chan, control, skb);
6755 l2cap_send_disconn_req(chan, ECONNRESET);
6757 const u8 rx_func_to_event[4] = {
6758 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6759 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6762 /* Only I-frames are expected in streaming mode */
6763 if (chan->mode == L2CAP_MODE_STREAMING)
6766 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6767 control->reqseq, control->final, control->poll,
6771 BT_ERR("Trailing bytes: %d in sframe", len);
6772 l2cap_send_disconn_req(chan, ECONNRESET);
6776 /* Validate F and P bits */
6777 if (control->final && (control->poll ||
6778 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6781 event = rx_func_to_event[control->super];
6782 if (l2cap_rx(chan, control, skb, event))
6783 l2cap_send_disconn_req(chan, ECONNRESET);
6793 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6795 struct l2cap_conn *conn = chan->conn;
6796 struct l2cap_le_credits pkt;
6799 /* We return more credits to the sender only after the amount of
6800 * credits falls below half of the initial amount.
6802 if (chan->rx_credits >= (L2CAP_LE_MAX_CREDITS + 1) / 2)
6805 return_credits = L2CAP_LE_MAX_CREDITS - chan->rx_credits;
6807 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6809 chan->rx_credits += return_credits;
6811 pkt.cid = cpu_to_le16(chan->scid);
6812 pkt.credits = cpu_to_le16(return_credits);
6814 chan->ident = l2cap_get_ident(conn);
6816 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6819 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6821 if (!chan->rx_credits)
6824 if (chan->imtu < skb->len)
6828 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6830 l2cap_chan_le_send_credits(chan);
6832 return chan->ops->recv(chan, skb);
6835 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6836 struct sk_buff *skb)
6838 struct l2cap_chan *chan;
6840 chan = l2cap_get_chan_by_scid(conn, cid);
6842 if (cid == L2CAP_CID_A2MP) {
6843 chan = a2mp_channel_create(conn, skb);
6849 l2cap_chan_lock(chan);
6851 BT_DBG("unknown cid 0x%4.4x", cid);
6852 /* Drop packet and return */
6858 BT_DBG("chan %p, len %d", chan, skb->len);
6860 if (chan->state != BT_CONNECTED)
6863 switch (chan->mode) {
6864 case L2CAP_MODE_LE_FLOWCTL:
6865 if (l2cap_le_data_rcv(chan, skb) < 0)
6870 case L2CAP_MODE_BASIC:
6871 /* If socket recv buffers overflows we drop data here
6872 * which is *bad* because L2CAP has to be reliable.
6873 * But we don't have any other choice. L2CAP doesn't
6874 * provide flow control mechanism. */
6876 if (chan->imtu < skb->len)
6879 if (!chan->ops->recv(chan, skb))
6883 case L2CAP_MODE_ERTM:
6884 case L2CAP_MODE_STREAMING:
6885 l2cap_data_rcv(chan, skb);
6889 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6897 l2cap_chan_unlock(chan);
6900 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6901 struct sk_buff *skb)
6903 struct hci_conn *hcon = conn->hcon;
6904 struct l2cap_chan *chan;
6906 if (hcon->type != ACL_LINK)
6909 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6914 BT_DBG("chan %p, len %d", chan, skb->len);
6916 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6919 if (chan->imtu < skb->len)
6922 /* Store remote BD_ADDR and PSM for msg_name */
6923 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6924 bt_cb(skb)->psm = psm;
6926 if (!chan->ops->recv(chan, skb))
6933 static void l2cap_att_channel(struct l2cap_conn *conn,
6934 struct sk_buff *skb)
6936 struct hci_conn *hcon = conn->hcon;
6937 struct l2cap_chan *chan;
6939 if (hcon->type != LE_LINK)
6942 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6943 &hcon->src, &hcon->dst);
6947 BT_DBG("chan %p, len %d", chan, skb->len);
6949 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6952 if (chan->imtu < skb->len)
6955 if (!chan->ops->recv(chan, skb))
6962 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6964 struct l2cap_hdr *lh = (void *) skb->data;
6968 skb_pull(skb, L2CAP_HDR_SIZE);
6969 cid = __le16_to_cpu(lh->cid);
6970 len = __le16_to_cpu(lh->len);
6972 if (len != skb->len) {
6977 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6980 case L2CAP_CID_SIGNALING:
6981 l2cap_sig_channel(conn, skb);
6984 case L2CAP_CID_CONN_LESS:
6985 psm = get_unaligned((__le16 *) skb->data);
6986 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6987 l2cap_conless_channel(conn, psm, skb);
6991 l2cap_att_channel(conn, skb);
6994 case L2CAP_CID_LE_SIGNALING:
6995 l2cap_le_sig_channel(conn, skb);
6999 if (smp_sig_channel(conn, skb))
7000 l2cap_conn_del(conn->hcon, EACCES);
7004 l2cap_data_channel(conn, cid, skb);
7009 /* ---- L2CAP interface with lower layer (HCI) ---- */
7011 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7013 int exact = 0, lm1 = 0, lm2 = 0;
7014 struct l2cap_chan *c;
7016 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7018 /* Find listening sockets and check their link_mode */
7019 read_lock(&chan_list_lock);
7020 list_for_each_entry(c, &chan_list, global_l) {
7021 if (c->state != BT_LISTEN)
7024 if (!bacmp(&c->src, &hdev->bdaddr)) {
7025 lm1 |= HCI_LM_ACCEPT;
7026 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7027 lm1 |= HCI_LM_MASTER;
7029 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7030 lm2 |= HCI_LM_ACCEPT;
7031 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7032 lm2 |= HCI_LM_MASTER;
7035 read_unlock(&chan_list_lock);
7037 return exact ? lm1 : lm2;
7040 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7042 struct l2cap_conn *conn;
7044 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7047 conn = l2cap_conn_add(hcon);
7049 l2cap_conn_ready(conn);
7051 l2cap_conn_del(hcon, bt_to_errno(status));
7055 int l2cap_disconn_ind(struct hci_conn *hcon)
7057 struct l2cap_conn *conn = hcon->l2cap_data;
7059 BT_DBG("hcon %p", hcon);
7062 return HCI_ERROR_REMOTE_USER_TERM;
7063 return conn->disc_reason;
7066 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7068 BT_DBG("hcon %p reason %d", hcon, reason);
7070 l2cap_conn_del(hcon, bt_to_errno(reason));
7073 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7075 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7078 if (encrypt == 0x00) {
7079 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7080 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7081 } else if (chan->sec_level == BT_SECURITY_HIGH)
7082 l2cap_chan_close(chan, ECONNREFUSED);
7084 if (chan->sec_level == BT_SECURITY_MEDIUM)
7085 __clear_chan_timer(chan);
7089 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7091 struct l2cap_conn *conn = hcon->l2cap_data;
7092 struct l2cap_chan *chan;
7097 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7099 if (hcon->type == LE_LINK) {
7100 if (!status && encrypt)
7101 smp_distribute_keys(conn, 0);
7102 cancel_delayed_work(&conn->security_timer);
7105 mutex_lock(&conn->chan_lock);
7107 list_for_each_entry(chan, &conn->chan_l, list) {
7108 l2cap_chan_lock(chan);
7110 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7111 state_to_string(chan->state));
7113 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7114 l2cap_chan_unlock(chan);
7118 if (chan->scid == L2CAP_CID_ATT) {
7119 if (!status && encrypt) {
7120 chan->sec_level = hcon->sec_level;
7121 l2cap_chan_ready(chan);
7124 l2cap_chan_unlock(chan);
7128 if (!__l2cap_no_conn_pending(chan)) {
7129 l2cap_chan_unlock(chan);
7133 if (!status && (chan->state == BT_CONNECTED ||
7134 chan->state == BT_CONFIG)) {
7135 chan->ops->resume(chan);
7136 l2cap_check_encryption(chan, encrypt);
7137 l2cap_chan_unlock(chan);
7141 if (chan->state == BT_CONNECT) {
7143 l2cap_start_connection(chan);
7145 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7146 } else if (chan->state == BT_CONNECT2) {
7147 struct l2cap_conn_rsp rsp;
7151 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7152 res = L2CAP_CR_PEND;
7153 stat = L2CAP_CS_AUTHOR_PEND;
7154 chan->ops->defer(chan);
7156 l2cap_state_change(chan, BT_CONFIG);
7157 res = L2CAP_CR_SUCCESS;
7158 stat = L2CAP_CS_NO_INFO;
7161 l2cap_state_change(chan, BT_DISCONN);
7162 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7163 res = L2CAP_CR_SEC_BLOCK;
7164 stat = L2CAP_CS_NO_INFO;
7167 rsp.scid = cpu_to_le16(chan->dcid);
7168 rsp.dcid = cpu_to_le16(chan->scid);
7169 rsp.result = cpu_to_le16(res);
7170 rsp.status = cpu_to_le16(stat);
7171 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7174 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7175 res == L2CAP_CR_SUCCESS) {
7177 set_bit(CONF_REQ_SENT, &chan->conf_state);
7178 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7180 l2cap_build_conf_req(chan, buf),
7182 chan->num_conf_req++;
7186 l2cap_chan_unlock(chan);
7189 mutex_unlock(&conn->chan_lock);
7194 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7196 struct l2cap_conn *conn = hcon->l2cap_data;
7197 struct l2cap_hdr *hdr;
7200 /* For AMP controller do not create l2cap conn */
7201 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7205 conn = l2cap_conn_add(hcon);
7210 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7214 case ACL_START_NO_FLUSH:
7217 BT_ERR("Unexpected start frame (len %d)", skb->len);
7218 kfree_skb(conn->rx_skb);
7219 conn->rx_skb = NULL;
7221 l2cap_conn_unreliable(conn, ECOMM);
7224 /* Start fragment always begin with Basic L2CAP header */
7225 if (skb->len < L2CAP_HDR_SIZE) {
7226 BT_ERR("Frame is too short (len %d)", skb->len);
7227 l2cap_conn_unreliable(conn, ECOMM);
7231 hdr = (struct l2cap_hdr *) skb->data;
7232 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7234 if (len == skb->len) {
7235 /* Complete frame received */
7236 l2cap_recv_frame(conn, skb);
7240 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7242 if (skb->len > len) {
7243 BT_ERR("Frame is too long (len %d, expected len %d)",
7245 l2cap_conn_unreliable(conn, ECOMM);
7249 /* Allocate skb for the complete frame (with header) */
7250 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7254 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7256 conn->rx_len = len - skb->len;
7260 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7262 if (!conn->rx_len) {
7263 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7264 l2cap_conn_unreliable(conn, ECOMM);
7268 if (skb->len > conn->rx_len) {
7269 BT_ERR("Fragment is too long (len %d, expected %d)",
7270 skb->len, conn->rx_len);
7271 kfree_skb(conn->rx_skb);
7272 conn->rx_skb = NULL;
7274 l2cap_conn_unreliable(conn, ECOMM);
7278 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7280 conn->rx_len -= skb->len;
7282 if (!conn->rx_len) {
7283 /* Complete frame received. l2cap_recv_frame
7284 * takes ownership of the skb so set the global
7285 * rx_skb pointer to NULL first.
7287 struct sk_buff *rx_skb = conn->rx_skb;
7288 conn->rx_skb = NULL;
7289 l2cap_recv_frame(conn, rx_skb);
7299 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7301 struct l2cap_chan *c;
7303 read_lock(&chan_list_lock);
7305 list_for_each_entry(c, &chan_list, global_l) {
7306 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7308 c->state, __le16_to_cpu(c->psm),
7309 c->scid, c->dcid, c->imtu, c->omtu,
7310 c->sec_level, c->mode);
7313 read_unlock(&chan_list_lock);
7318 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7320 return single_open(file, l2cap_debugfs_show, inode->i_private);
7323 static const struct file_operations l2cap_debugfs_fops = {
7324 .open = l2cap_debugfs_open,
7326 .llseek = seq_lseek,
7327 .release = single_release,
7330 static struct dentry *l2cap_debugfs;
7332 int __init l2cap_init(void)
7336 err = l2cap_init_sockets();
7340 if (IS_ERR_OR_NULL(bt_debugfs))
7343 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7344 NULL, &l2cap_debugfs_fops);
7349 void l2cap_exit(void)
7351 debugfs_remove(l2cap_debugfs);
7352 l2cap_cleanup_sockets();
7355 module_param(disable_ertm, bool, 0644);
7356 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");