2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
510 chan->scid = l2cap_alloc_cid(conn);
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546 l2cap_chan_hold(chan);
548 hci_conn_hold(conn->hcon);
550 list_add(&chan->list, &conn->chan_l);
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
562 struct l2cap_conn *conn = chan->conn;
564 __clear_chan_timer(chan);
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
573 l2cap_chan_put(chan);
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
591 chan->ops->teardown(chan, err);
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
597 case L2CAP_MODE_BASIC:
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
605 skb_queue_purge(&chan->srej_q);
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
620 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
622 struct l2cap_conn *conn = chan->conn;
623 struct l2cap_le_conn_rsp rsp;
626 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
627 result = L2CAP_CR_AUTHORIZATION;
629 result = L2CAP_CR_BAD_PSM;
631 l2cap_state_change(chan, BT_DISCONN);
633 rsp.dcid = cpu_to_le16(chan->scid);
634 rsp.mtu = cpu_to_le16(chan->imtu);
635 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
636 rsp.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
637 rsp.result = cpu_to_le16(result);
639 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
643 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
645 struct l2cap_conn *conn = chan->conn;
646 struct l2cap_conn_rsp rsp;
649 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
650 result = L2CAP_CR_SEC_BLOCK;
652 result = L2CAP_CR_BAD_PSM;
654 l2cap_state_change(chan, BT_DISCONN);
656 rsp.scid = cpu_to_le16(chan->dcid);
657 rsp.dcid = cpu_to_le16(chan->scid);
658 rsp.result = cpu_to_le16(result);
659 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
661 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
664 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
666 struct l2cap_conn *conn = chan->conn;
668 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
670 switch (chan->state) {
672 chan->ops->teardown(chan, 0);
677 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
678 * check for chan->psm.
680 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
681 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
682 l2cap_send_disconn_req(chan, reason);
684 l2cap_chan_del(chan, reason);
688 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
689 if (conn->hcon->type == ACL_LINK)
690 l2cap_chan_connect_reject(chan);
691 else if (conn->hcon->type == LE_LINK)
692 l2cap_chan_le_connect_reject(chan);
695 l2cap_chan_del(chan, reason);
700 l2cap_chan_del(chan, reason);
704 chan->ops->teardown(chan, 0);
709 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
711 switch (chan->chan_type) {
713 switch (chan->sec_level) {
714 case BT_SECURITY_HIGH:
715 return HCI_AT_DEDICATED_BONDING_MITM;
716 case BT_SECURITY_MEDIUM:
717 return HCI_AT_DEDICATED_BONDING;
719 return HCI_AT_NO_BONDING;
722 case L2CAP_CHAN_CONN_LESS:
723 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
724 if (chan->sec_level == BT_SECURITY_LOW)
725 chan->sec_level = BT_SECURITY_SDP;
727 if (chan->sec_level == BT_SECURITY_HIGH)
728 return HCI_AT_NO_BONDING_MITM;
730 return HCI_AT_NO_BONDING;
732 case L2CAP_CHAN_CONN_ORIENTED:
733 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
734 if (chan->sec_level == BT_SECURITY_LOW)
735 chan->sec_level = BT_SECURITY_SDP;
737 if (chan->sec_level == BT_SECURITY_HIGH)
738 return HCI_AT_NO_BONDING_MITM;
740 return HCI_AT_NO_BONDING;
744 switch (chan->sec_level) {
745 case BT_SECURITY_HIGH:
746 return HCI_AT_GENERAL_BONDING_MITM;
747 case BT_SECURITY_MEDIUM:
748 return HCI_AT_GENERAL_BONDING;
750 return HCI_AT_NO_BONDING;
756 /* Service level security */
757 int l2cap_chan_check_security(struct l2cap_chan *chan)
759 struct l2cap_conn *conn = chan->conn;
762 if (conn->hcon->type == LE_LINK)
763 return smp_conn_security(conn->hcon, chan->sec_level);
765 auth_type = l2cap_get_auth_type(chan);
767 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
770 static u8 l2cap_get_ident(struct l2cap_conn *conn)
774 /* Get next available identificator.
775 * 1 - 128 are used by kernel.
776 * 129 - 199 are reserved.
777 * 200 - 254 are used by utilities like l2ping, etc.
780 spin_lock(&conn->lock);
782 if (++conn->tx_ident > 128)
787 spin_unlock(&conn->lock);
792 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
795 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
798 BT_DBG("code 0x%2.2x", code);
803 if (lmp_no_flush_capable(conn->hcon->hdev))
804 flags = ACL_START_NO_FLUSH;
808 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
809 skb->priority = HCI_PRIO_MAX;
811 hci_send_acl(conn->hchan, skb, flags);
814 static bool __chan_is_moving(struct l2cap_chan *chan)
816 return chan->move_state != L2CAP_MOVE_STABLE &&
817 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
820 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
822 struct hci_conn *hcon = chan->conn->hcon;
825 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
828 if (chan->hs_hcon && !__chan_is_moving(chan)) {
830 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
837 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
838 lmp_no_flush_capable(hcon->hdev))
839 flags = ACL_START_NO_FLUSH;
843 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
844 hci_send_acl(chan->conn->hchan, skb, flags);
847 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
849 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
850 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
852 if (enh & L2CAP_CTRL_FRAME_TYPE) {
855 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
856 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
863 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
864 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
871 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
873 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
874 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
876 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
879 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
880 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
887 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
888 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
895 static inline void __unpack_control(struct l2cap_chan *chan,
898 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
899 __unpack_extended_control(get_unaligned_le32(skb->data),
900 &bt_cb(skb)->control);
901 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
903 __unpack_enhanced_control(get_unaligned_le16(skb->data),
904 &bt_cb(skb)->control);
905 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
909 static u32 __pack_extended_control(struct l2cap_ctrl *control)
913 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
914 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
916 if (control->sframe) {
917 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
918 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
919 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
921 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
922 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
928 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
932 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
933 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
935 if (control->sframe) {
936 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
937 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
938 packed |= L2CAP_CTRL_FRAME_TYPE;
940 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
941 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
947 static inline void __pack_control(struct l2cap_chan *chan,
948 struct l2cap_ctrl *control,
951 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
952 put_unaligned_le32(__pack_extended_control(control),
953 skb->data + L2CAP_HDR_SIZE);
955 put_unaligned_le16(__pack_enhanced_control(control),
956 skb->data + L2CAP_HDR_SIZE);
960 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
962 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
963 return L2CAP_EXT_HDR_SIZE;
965 return L2CAP_ENH_HDR_SIZE;
968 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
972 struct l2cap_hdr *lh;
973 int hlen = __ertm_hdr_size(chan);
975 if (chan->fcs == L2CAP_FCS_CRC16)
976 hlen += L2CAP_FCS_SIZE;
978 skb = bt_skb_alloc(hlen, GFP_KERNEL);
981 return ERR_PTR(-ENOMEM);
983 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
984 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
985 lh->cid = cpu_to_le16(chan->dcid);
987 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
988 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
990 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
992 if (chan->fcs == L2CAP_FCS_CRC16) {
993 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
994 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
997 skb->priority = HCI_PRIO_MAX;
1001 static void l2cap_send_sframe(struct l2cap_chan *chan,
1002 struct l2cap_ctrl *control)
1004 struct sk_buff *skb;
1007 BT_DBG("chan %p, control %p", chan, control);
1009 if (!control->sframe)
1012 if (__chan_is_moving(chan))
1015 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1019 if (control->super == L2CAP_SUPER_RR)
1020 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1021 else if (control->super == L2CAP_SUPER_RNR)
1022 set_bit(CONN_RNR_SENT, &chan->conn_state);
1024 if (control->super != L2CAP_SUPER_SREJ) {
1025 chan->last_acked_seq = control->reqseq;
1026 __clear_ack_timer(chan);
1029 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1030 control->final, control->poll, control->super);
1032 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1033 control_field = __pack_extended_control(control);
1035 control_field = __pack_enhanced_control(control);
1037 skb = l2cap_create_sframe_pdu(chan, control_field);
1039 l2cap_do_send(chan, skb);
1042 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1044 struct l2cap_ctrl control;
1046 BT_DBG("chan %p, poll %d", chan, poll);
1048 memset(&control, 0, sizeof(control));
1050 control.poll = poll;
1052 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1053 control.super = L2CAP_SUPER_RNR;
1055 control.super = L2CAP_SUPER_RR;
1057 control.reqseq = chan->buffer_seq;
1058 l2cap_send_sframe(chan, &control);
1061 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1063 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1066 static bool __amp_capable(struct l2cap_chan *chan)
1068 struct l2cap_conn *conn = chan->conn;
1069 struct hci_dev *hdev;
1070 bool amp_available = false;
1072 if (!conn->hs_enabled)
1075 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1078 read_lock(&hci_dev_list_lock);
1079 list_for_each_entry(hdev, &hci_dev_list, list) {
1080 if (hdev->amp_type != AMP_TYPE_BREDR &&
1081 test_bit(HCI_UP, &hdev->flags)) {
1082 amp_available = true;
1086 read_unlock(&hci_dev_list_lock);
1088 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1089 return amp_available;
1094 static bool l2cap_check_efs(struct l2cap_chan *chan)
1096 /* Check EFS parameters */
1100 void l2cap_send_conn_req(struct l2cap_chan *chan)
1102 struct l2cap_conn *conn = chan->conn;
1103 struct l2cap_conn_req req;
1105 req.scid = cpu_to_le16(chan->scid);
1106 req.psm = chan->psm;
1108 chan->ident = l2cap_get_ident(conn);
1110 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1112 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1115 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1117 struct l2cap_create_chan_req req;
1118 req.scid = cpu_to_le16(chan->scid);
1119 req.psm = chan->psm;
1120 req.amp_id = amp_id;
1122 chan->ident = l2cap_get_ident(chan->conn);
1124 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1128 static void l2cap_move_setup(struct l2cap_chan *chan)
1130 struct sk_buff *skb;
1132 BT_DBG("chan %p", chan);
1134 if (chan->mode != L2CAP_MODE_ERTM)
1137 __clear_retrans_timer(chan);
1138 __clear_monitor_timer(chan);
1139 __clear_ack_timer(chan);
1141 chan->retry_count = 0;
1142 skb_queue_walk(&chan->tx_q, skb) {
1143 if (bt_cb(skb)->control.retries)
1144 bt_cb(skb)->control.retries = 1;
1149 chan->expected_tx_seq = chan->buffer_seq;
1151 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1152 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1153 l2cap_seq_list_clear(&chan->retrans_list);
1154 l2cap_seq_list_clear(&chan->srej_list);
1155 skb_queue_purge(&chan->srej_q);
1157 chan->tx_state = L2CAP_TX_STATE_XMIT;
1158 chan->rx_state = L2CAP_RX_STATE_MOVE;
1160 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1163 static void l2cap_move_done(struct l2cap_chan *chan)
1165 u8 move_role = chan->move_role;
1166 BT_DBG("chan %p", chan);
1168 chan->move_state = L2CAP_MOVE_STABLE;
1169 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1171 if (chan->mode != L2CAP_MODE_ERTM)
1174 switch (move_role) {
1175 case L2CAP_MOVE_ROLE_INITIATOR:
1176 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1177 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1179 case L2CAP_MOVE_ROLE_RESPONDER:
1180 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1185 static void l2cap_chan_ready(struct l2cap_chan *chan)
1187 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1188 chan->conf_state = 0;
1189 __clear_chan_timer(chan);
1191 chan->state = BT_CONNECTED;
1193 chan->ops->ready(chan);
1196 static void l2cap_le_connect(struct l2cap_chan *chan)
1198 struct l2cap_conn *conn = chan->conn;
1199 struct l2cap_le_conn_req req;
1201 req.psm = chan->psm;
1202 req.scid = cpu_to_le16(chan->scid);
1203 req.mtu = cpu_to_le16(chan->imtu);
1204 req.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
1205 req.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
1207 chan->ident = l2cap_get_ident(conn);
1209 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1213 static void l2cap_le_start(struct l2cap_chan *chan)
1215 struct l2cap_conn *conn = chan->conn;
1217 if (!smp_conn_security(conn->hcon, chan->sec_level))
1221 l2cap_chan_ready(chan);
1225 if (chan->state == BT_CONNECT)
1226 l2cap_le_connect(chan);
1229 static void l2cap_start_connection(struct l2cap_chan *chan)
1231 if (__amp_capable(chan)) {
1232 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1233 a2mp_discover_amp(chan);
1234 } else if (chan->conn->hcon->type == LE_LINK) {
1235 l2cap_le_start(chan);
1237 l2cap_send_conn_req(chan);
1241 static void l2cap_do_start(struct l2cap_chan *chan)
1243 struct l2cap_conn *conn = chan->conn;
1245 if (conn->hcon->type == LE_LINK) {
1246 l2cap_le_start(chan);
1250 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1251 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1254 if (l2cap_chan_check_security(chan) &&
1255 __l2cap_no_conn_pending(chan)) {
1256 l2cap_start_connection(chan);
1259 struct l2cap_info_req req;
1260 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1262 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1263 conn->info_ident = l2cap_get_ident(conn);
1265 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1267 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1272 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1274 u32 local_feat_mask = l2cap_feat_mask;
1276 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1279 case L2CAP_MODE_ERTM:
1280 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1281 case L2CAP_MODE_STREAMING:
1282 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1288 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1290 struct l2cap_conn *conn = chan->conn;
1291 struct l2cap_disconn_req req;
1296 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1297 __clear_retrans_timer(chan);
1298 __clear_monitor_timer(chan);
1299 __clear_ack_timer(chan);
1302 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1303 l2cap_state_change(chan, BT_DISCONN);
1307 req.dcid = cpu_to_le16(chan->dcid);
1308 req.scid = cpu_to_le16(chan->scid);
1309 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1312 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1315 /* ---- L2CAP connections ---- */
1316 static void l2cap_conn_start(struct l2cap_conn *conn)
1318 struct l2cap_chan *chan, *tmp;
1320 BT_DBG("conn %p", conn);
1322 mutex_lock(&conn->chan_lock);
1324 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1325 l2cap_chan_lock(chan);
1327 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1328 l2cap_chan_unlock(chan);
1332 if (chan->state == BT_CONNECT) {
1333 if (!l2cap_chan_check_security(chan) ||
1334 !__l2cap_no_conn_pending(chan)) {
1335 l2cap_chan_unlock(chan);
1339 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1340 && test_bit(CONF_STATE2_DEVICE,
1341 &chan->conf_state)) {
1342 l2cap_chan_close(chan, ECONNRESET);
1343 l2cap_chan_unlock(chan);
1347 l2cap_start_connection(chan);
1349 } else if (chan->state == BT_CONNECT2) {
1350 struct l2cap_conn_rsp rsp;
1352 rsp.scid = cpu_to_le16(chan->dcid);
1353 rsp.dcid = cpu_to_le16(chan->scid);
1355 if (l2cap_chan_check_security(chan)) {
1356 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1357 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1358 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1359 chan->ops->defer(chan);
1362 l2cap_state_change(chan, BT_CONFIG);
1363 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1364 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1367 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1368 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1371 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1374 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1375 rsp.result != L2CAP_CR_SUCCESS) {
1376 l2cap_chan_unlock(chan);
1380 set_bit(CONF_REQ_SENT, &chan->conf_state);
1381 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1382 l2cap_build_conf_req(chan, buf), buf);
1383 chan->num_conf_req++;
1386 l2cap_chan_unlock(chan);
1389 mutex_unlock(&conn->chan_lock);
1392 /* Find socket with cid and source/destination bdaddr.
1393 * Returns closest match, locked.
1395 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1399 struct l2cap_chan *c, *c1 = NULL;
1401 read_lock(&chan_list_lock);
1403 list_for_each_entry(c, &chan_list, global_l) {
1404 if (state && c->state != state)
1407 if (c->scid == cid) {
1408 int src_match, dst_match;
1409 int src_any, dst_any;
1412 src_match = !bacmp(&c->src, src);
1413 dst_match = !bacmp(&c->dst, dst);
1414 if (src_match && dst_match) {
1415 read_unlock(&chan_list_lock);
1420 src_any = !bacmp(&c->src, BDADDR_ANY);
1421 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1422 if ((src_match && dst_any) || (src_any && dst_match) ||
1423 (src_any && dst_any))
1428 read_unlock(&chan_list_lock);
1433 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1435 struct hci_conn *hcon = conn->hcon;
1436 struct l2cap_chan *chan, *pchan;
1441 /* Check if we have socket listening on cid */
1442 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1443 &hcon->src, &hcon->dst);
1447 /* Client ATT sockets should override the server one */
1448 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1451 dst_type = bdaddr_type(hcon, hcon->dst_type);
1453 /* If device is blocked, do not create a channel for it */
1454 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1457 l2cap_chan_lock(pchan);
1459 chan = pchan->ops->new_connection(pchan);
1463 chan->dcid = L2CAP_CID_ATT;
1465 bacpy(&chan->src, &hcon->src);
1466 bacpy(&chan->dst, &hcon->dst);
1467 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1468 chan->dst_type = dst_type;
1470 __l2cap_chan_add(conn, chan);
1473 l2cap_chan_unlock(pchan);
1476 static void l2cap_conn_ready(struct l2cap_conn *conn)
1478 struct l2cap_chan *chan;
1479 struct hci_conn *hcon = conn->hcon;
1481 BT_DBG("conn %p", conn);
1483 /* For outgoing pairing which doesn't necessarily have an
1484 * associated socket (e.g. mgmt_pair_device).
1486 if (hcon->out && hcon->type == LE_LINK)
1487 smp_conn_security(hcon, hcon->pending_sec_level);
1489 mutex_lock(&conn->chan_lock);
1491 if (hcon->type == LE_LINK)
1492 l2cap_le_conn_ready(conn);
1494 list_for_each_entry(chan, &conn->chan_l, list) {
1496 l2cap_chan_lock(chan);
1498 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1499 l2cap_chan_unlock(chan);
1503 if (hcon->type == LE_LINK) {
1504 l2cap_le_start(chan);
1505 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1506 l2cap_chan_ready(chan);
1508 } else if (chan->state == BT_CONNECT) {
1509 l2cap_do_start(chan);
1512 l2cap_chan_unlock(chan);
1515 mutex_unlock(&conn->chan_lock);
1518 /* Notify sockets that we cannot guaranty reliability anymore */
1519 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1521 struct l2cap_chan *chan;
1523 BT_DBG("conn %p", conn);
1525 mutex_lock(&conn->chan_lock);
1527 list_for_each_entry(chan, &conn->chan_l, list) {
1528 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1529 l2cap_chan_set_err(chan, err);
1532 mutex_unlock(&conn->chan_lock);
1535 static void l2cap_info_timeout(struct work_struct *work)
1537 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1540 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1541 conn->info_ident = 0;
1543 l2cap_conn_start(conn);
1548 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1549 * callback is called during registration. The ->remove callback is called
1550 * during unregistration.
1551 * An l2cap_user object can either be explicitly unregistered or when the
1552 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1553 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1554 * External modules must own a reference to the l2cap_conn object if they intend
1555 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1556 * any time if they don't.
1559 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1561 struct hci_dev *hdev = conn->hcon->hdev;
1564 /* We need to check whether l2cap_conn is registered. If it is not, we
1565 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1566 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1567 * relies on the parent hci_conn object to be locked. This itself relies
1568 * on the hci_dev object to be locked. So we must lock the hci device
1573 if (user->list.next || user->list.prev) {
1578 /* conn->hchan is NULL after l2cap_conn_del() was called */
1584 ret = user->probe(conn, user);
1588 list_add(&user->list, &conn->users);
1592 hci_dev_unlock(hdev);
1595 EXPORT_SYMBOL(l2cap_register_user);
1597 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1599 struct hci_dev *hdev = conn->hcon->hdev;
1603 if (!user->list.next || !user->list.prev)
1606 list_del(&user->list);
1607 user->list.next = NULL;
1608 user->list.prev = NULL;
1609 user->remove(conn, user);
1612 hci_dev_unlock(hdev);
1614 EXPORT_SYMBOL(l2cap_unregister_user);
1616 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1618 struct l2cap_user *user;
1620 while (!list_empty(&conn->users)) {
1621 user = list_first_entry(&conn->users, struct l2cap_user, list);
1622 list_del(&user->list);
1623 user->list.next = NULL;
1624 user->list.prev = NULL;
1625 user->remove(conn, user);
1629 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1631 struct l2cap_conn *conn = hcon->l2cap_data;
1632 struct l2cap_chan *chan, *l;
1637 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1639 kfree_skb(conn->rx_skb);
1641 l2cap_unregister_all_users(conn);
1643 mutex_lock(&conn->chan_lock);
1646 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1647 l2cap_chan_hold(chan);
1648 l2cap_chan_lock(chan);
1650 l2cap_chan_del(chan, err);
1652 l2cap_chan_unlock(chan);
1654 chan->ops->close(chan);
1655 l2cap_chan_put(chan);
1658 mutex_unlock(&conn->chan_lock);
1660 hci_chan_del(conn->hchan);
1662 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1663 cancel_delayed_work_sync(&conn->info_timer);
1665 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1666 cancel_delayed_work_sync(&conn->security_timer);
1667 smp_chan_destroy(conn);
1670 hcon->l2cap_data = NULL;
1672 l2cap_conn_put(conn);
1675 static void security_timeout(struct work_struct *work)
1677 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1678 security_timer.work);
1680 BT_DBG("conn %p", conn);
1682 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1683 smp_chan_destroy(conn);
1684 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1688 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1690 struct l2cap_conn *conn = hcon->l2cap_data;
1691 struct hci_chan *hchan;
1696 hchan = hci_chan_create(hcon);
1700 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1702 hci_chan_del(hchan);
1706 kref_init(&conn->ref);
1707 hcon->l2cap_data = conn;
1709 hci_conn_get(conn->hcon);
1710 conn->hchan = hchan;
1712 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1714 switch (hcon->type) {
1716 if (hcon->hdev->le_mtu) {
1717 conn->mtu = hcon->hdev->le_mtu;
1722 conn->mtu = hcon->hdev->acl_mtu;
1726 conn->feat_mask = 0;
1728 if (hcon->type == ACL_LINK)
1729 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1730 &hcon->hdev->dev_flags);
1732 spin_lock_init(&conn->lock);
1733 mutex_init(&conn->chan_lock);
1735 INIT_LIST_HEAD(&conn->chan_l);
1736 INIT_LIST_HEAD(&conn->users);
1738 if (hcon->type == LE_LINK)
1739 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1741 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1743 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1748 static void l2cap_conn_free(struct kref *ref)
1750 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1752 hci_conn_put(conn->hcon);
1756 void l2cap_conn_get(struct l2cap_conn *conn)
1758 kref_get(&conn->ref);
1760 EXPORT_SYMBOL(l2cap_conn_get);
1762 void l2cap_conn_put(struct l2cap_conn *conn)
1764 kref_put(&conn->ref, l2cap_conn_free);
1766 EXPORT_SYMBOL(l2cap_conn_put);
1768 /* ---- Socket interface ---- */
1770 /* Find socket with psm and source / destination bdaddr.
1771 * Returns closest match.
1773 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1778 struct l2cap_chan *c, *c1 = NULL;
1780 read_lock(&chan_list_lock);
1782 list_for_each_entry(c, &chan_list, global_l) {
1783 if (state && c->state != state)
1786 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1789 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1792 if (c->psm == psm) {
1793 int src_match, dst_match;
1794 int src_any, dst_any;
1797 src_match = !bacmp(&c->src, src);
1798 dst_match = !bacmp(&c->dst, dst);
1799 if (src_match && dst_match) {
1800 read_unlock(&chan_list_lock);
1805 src_any = !bacmp(&c->src, BDADDR_ANY);
1806 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1807 if ((src_match && dst_any) || (src_any && dst_match) ||
1808 (src_any && dst_any))
1813 read_unlock(&chan_list_lock);
1818 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1819 bdaddr_t *dst, u8 dst_type)
1821 struct l2cap_conn *conn;
1822 struct hci_conn *hcon;
1823 struct hci_dev *hdev;
1827 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1828 dst_type, __le16_to_cpu(psm));
1830 hdev = hci_get_route(dst, &chan->src);
1832 return -EHOSTUNREACH;
1836 l2cap_chan_lock(chan);
1838 /* PSM must be odd and lsb of upper byte must be 0 */
1839 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1840 chan->chan_type != L2CAP_CHAN_RAW) {
1845 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1850 switch (chan->mode) {
1851 case L2CAP_MODE_BASIC:
1853 case L2CAP_MODE_ERTM:
1854 case L2CAP_MODE_STREAMING:
1863 switch (chan->state) {
1867 /* Already connecting */
1872 /* Already connected */
1886 /* Set destination address and psm */
1887 bacpy(&chan->dst, dst);
1888 chan->dst_type = dst_type;
1893 auth_type = l2cap_get_auth_type(chan);
1895 if (bdaddr_type_is_le(dst_type))
1896 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1897 chan->sec_level, auth_type);
1899 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1900 chan->sec_level, auth_type);
1903 err = PTR_ERR(hcon);
1907 conn = l2cap_conn_add(hcon);
1909 hci_conn_drop(hcon);
1914 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1915 hci_conn_drop(hcon);
1920 /* Update source addr of the socket */
1921 bacpy(&chan->src, &hcon->src);
1922 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1924 l2cap_chan_unlock(chan);
1925 l2cap_chan_add(conn, chan);
1926 l2cap_chan_lock(chan);
1928 /* l2cap_chan_add takes its own ref so we can drop this one */
1929 hci_conn_drop(hcon);
1931 l2cap_state_change(chan, BT_CONNECT);
1932 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1934 if (hcon->state == BT_CONNECTED) {
1935 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1936 __clear_chan_timer(chan);
1937 if (l2cap_chan_check_security(chan))
1938 l2cap_state_change(chan, BT_CONNECTED);
1940 l2cap_do_start(chan);
1946 l2cap_chan_unlock(chan);
1947 hci_dev_unlock(hdev);
1952 static void l2cap_monitor_timeout(struct work_struct *work)
1954 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1955 monitor_timer.work);
1957 BT_DBG("chan %p", chan);
1959 l2cap_chan_lock(chan);
1962 l2cap_chan_unlock(chan);
1963 l2cap_chan_put(chan);
1967 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1969 l2cap_chan_unlock(chan);
1970 l2cap_chan_put(chan);
1973 static void l2cap_retrans_timeout(struct work_struct *work)
1975 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1976 retrans_timer.work);
1978 BT_DBG("chan %p", chan);
1980 l2cap_chan_lock(chan);
1983 l2cap_chan_unlock(chan);
1984 l2cap_chan_put(chan);
1988 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1989 l2cap_chan_unlock(chan);
1990 l2cap_chan_put(chan);
1993 static void l2cap_streaming_send(struct l2cap_chan *chan,
1994 struct sk_buff_head *skbs)
1996 struct sk_buff *skb;
1997 struct l2cap_ctrl *control;
1999 BT_DBG("chan %p, skbs %p", chan, skbs);
2001 if (__chan_is_moving(chan))
2004 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2006 while (!skb_queue_empty(&chan->tx_q)) {
2008 skb = skb_dequeue(&chan->tx_q);
2010 bt_cb(skb)->control.retries = 1;
2011 control = &bt_cb(skb)->control;
2013 control->reqseq = 0;
2014 control->txseq = chan->next_tx_seq;
2016 __pack_control(chan, control, skb);
2018 if (chan->fcs == L2CAP_FCS_CRC16) {
2019 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2020 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2023 l2cap_do_send(chan, skb);
2025 BT_DBG("Sent txseq %u", control->txseq);
2027 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2028 chan->frames_sent++;
2032 static int l2cap_ertm_send(struct l2cap_chan *chan)
2034 struct sk_buff *skb, *tx_skb;
2035 struct l2cap_ctrl *control;
2038 BT_DBG("chan %p", chan);
2040 if (chan->state != BT_CONNECTED)
2043 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2046 if (__chan_is_moving(chan))
2049 while (chan->tx_send_head &&
2050 chan->unacked_frames < chan->remote_tx_win &&
2051 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2053 skb = chan->tx_send_head;
2055 bt_cb(skb)->control.retries = 1;
2056 control = &bt_cb(skb)->control;
2058 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2061 control->reqseq = chan->buffer_seq;
2062 chan->last_acked_seq = chan->buffer_seq;
2063 control->txseq = chan->next_tx_seq;
2065 __pack_control(chan, control, skb);
2067 if (chan->fcs == L2CAP_FCS_CRC16) {
2068 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2069 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2072 /* Clone after data has been modified. Data is assumed to be
2073 read-only (for locking purposes) on cloned sk_buffs.
2075 tx_skb = skb_clone(skb, GFP_KERNEL);
2080 __set_retrans_timer(chan);
2082 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2083 chan->unacked_frames++;
2084 chan->frames_sent++;
2087 if (skb_queue_is_last(&chan->tx_q, skb))
2088 chan->tx_send_head = NULL;
2090 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2092 l2cap_do_send(chan, tx_skb);
2093 BT_DBG("Sent txseq %u", control->txseq);
2096 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2097 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2102 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2104 struct l2cap_ctrl control;
2105 struct sk_buff *skb;
2106 struct sk_buff *tx_skb;
2109 BT_DBG("chan %p", chan);
2111 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2114 if (__chan_is_moving(chan))
2117 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2118 seq = l2cap_seq_list_pop(&chan->retrans_list);
2120 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2122 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2127 bt_cb(skb)->control.retries++;
2128 control = bt_cb(skb)->control;
2130 if (chan->max_tx != 0 &&
2131 bt_cb(skb)->control.retries > chan->max_tx) {
2132 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2133 l2cap_send_disconn_req(chan, ECONNRESET);
2134 l2cap_seq_list_clear(&chan->retrans_list);
2138 control.reqseq = chan->buffer_seq;
2139 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2144 if (skb_cloned(skb)) {
2145 /* Cloned sk_buffs are read-only, so we need a
2148 tx_skb = skb_copy(skb, GFP_KERNEL);
2150 tx_skb = skb_clone(skb, GFP_KERNEL);
2154 l2cap_seq_list_clear(&chan->retrans_list);
2158 /* Update skb contents */
2159 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2160 put_unaligned_le32(__pack_extended_control(&control),
2161 tx_skb->data + L2CAP_HDR_SIZE);
2163 put_unaligned_le16(__pack_enhanced_control(&control),
2164 tx_skb->data + L2CAP_HDR_SIZE);
2167 if (chan->fcs == L2CAP_FCS_CRC16) {
2168 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2169 put_unaligned_le16(fcs, skb_put(tx_skb,
2173 l2cap_do_send(chan, tx_skb);
2175 BT_DBG("Resent txseq %d", control.txseq);
2177 chan->last_acked_seq = chan->buffer_seq;
2181 static void l2cap_retransmit(struct l2cap_chan *chan,
2182 struct l2cap_ctrl *control)
2184 BT_DBG("chan %p, control %p", chan, control);
2186 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2187 l2cap_ertm_resend(chan);
2190 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2191 struct l2cap_ctrl *control)
2193 struct sk_buff *skb;
2195 BT_DBG("chan %p, control %p", chan, control);
2198 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2200 l2cap_seq_list_clear(&chan->retrans_list);
2202 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2205 if (chan->unacked_frames) {
2206 skb_queue_walk(&chan->tx_q, skb) {
2207 if (bt_cb(skb)->control.txseq == control->reqseq ||
2208 skb == chan->tx_send_head)
2212 skb_queue_walk_from(&chan->tx_q, skb) {
2213 if (skb == chan->tx_send_head)
2216 l2cap_seq_list_append(&chan->retrans_list,
2217 bt_cb(skb)->control.txseq);
2220 l2cap_ertm_resend(chan);
2224 static void l2cap_send_ack(struct l2cap_chan *chan)
2226 struct l2cap_ctrl control;
2227 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2228 chan->last_acked_seq);
2231 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2232 chan, chan->last_acked_seq, chan->buffer_seq);
2234 memset(&control, 0, sizeof(control));
2237 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2238 chan->rx_state == L2CAP_RX_STATE_RECV) {
2239 __clear_ack_timer(chan);
2240 control.super = L2CAP_SUPER_RNR;
2241 control.reqseq = chan->buffer_seq;
2242 l2cap_send_sframe(chan, &control);
2244 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2245 l2cap_ertm_send(chan);
2246 /* If any i-frames were sent, they included an ack */
2247 if (chan->buffer_seq == chan->last_acked_seq)
2251 /* Ack now if the window is 3/4ths full.
2252 * Calculate without mul or div
2254 threshold = chan->ack_win;
2255 threshold += threshold << 1;
2258 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2261 if (frames_to_ack >= threshold) {
2262 __clear_ack_timer(chan);
2263 control.super = L2CAP_SUPER_RR;
2264 control.reqseq = chan->buffer_seq;
2265 l2cap_send_sframe(chan, &control);
2270 __set_ack_timer(chan);
2274 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2275 struct msghdr *msg, int len,
2276 int count, struct sk_buff *skb)
2278 struct l2cap_conn *conn = chan->conn;
2279 struct sk_buff **frag;
2282 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2288 /* Continuation fragments (no L2CAP header) */
2289 frag = &skb_shinfo(skb)->frag_list;
2291 struct sk_buff *tmp;
2293 count = min_t(unsigned int, conn->mtu, len);
2295 tmp = chan->ops->alloc_skb(chan, count,
2296 msg->msg_flags & MSG_DONTWAIT);
2298 return PTR_ERR(tmp);
2302 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2305 (*frag)->priority = skb->priority;
2310 skb->len += (*frag)->len;
2311 skb->data_len += (*frag)->len;
2313 frag = &(*frag)->next;
2319 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2320 struct msghdr *msg, size_t len,
2323 struct l2cap_conn *conn = chan->conn;
2324 struct sk_buff *skb;
2325 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2326 struct l2cap_hdr *lh;
2328 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2329 __le16_to_cpu(chan->psm), len, priority);
2331 count = min_t(unsigned int, (conn->mtu - hlen), len);
2333 skb = chan->ops->alloc_skb(chan, count + hlen,
2334 msg->msg_flags & MSG_DONTWAIT);
2338 skb->priority = priority;
2340 /* Create L2CAP header */
2341 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2342 lh->cid = cpu_to_le16(chan->dcid);
2343 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2344 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2346 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2347 if (unlikely(err < 0)) {
2349 return ERR_PTR(err);
2354 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2355 struct msghdr *msg, size_t len,
2358 struct l2cap_conn *conn = chan->conn;
2359 struct sk_buff *skb;
2361 struct l2cap_hdr *lh;
2363 BT_DBG("chan %p len %zu", chan, len);
2365 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2367 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2368 msg->msg_flags & MSG_DONTWAIT);
2372 skb->priority = priority;
2374 /* Create L2CAP header */
2375 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2376 lh->cid = cpu_to_le16(chan->dcid);
2377 lh->len = cpu_to_le16(len);
2379 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2380 if (unlikely(err < 0)) {
2382 return ERR_PTR(err);
2387 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2388 struct msghdr *msg, size_t len,
2391 struct l2cap_conn *conn = chan->conn;
2392 struct sk_buff *skb;
2393 int err, count, hlen;
2394 struct l2cap_hdr *lh;
2396 BT_DBG("chan %p len %zu", chan, len);
2399 return ERR_PTR(-ENOTCONN);
2401 hlen = __ertm_hdr_size(chan);
2404 hlen += L2CAP_SDULEN_SIZE;
2406 if (chan->fcs == L2CAP_FCS_CRC16)
2407 hlen += L2CAP_FCS_SIZE;
2409 count = min_t(unsigned int, (conn->mtu - hlen), len);
2411 skb = chan->ops->alloc_skb(chan, count + hlen,
2412 msg->msg_flags & MSG_DONTWAIT);
2416 /* Create L2CAP header */
2417 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2418 lh->cid = cpu_to_le16(chan->dcid);
2419 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2421 /* Control header is populated later */
2422 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2423 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2425 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2428 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2430 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2431 if (unlikely(err < 0)) {
2433 return ERR_PTR(err);
2436 bt_cb(skb)->control.fcs = chan->fcs;
2437 bt_cb(skb)->control.retries = 0;
2441 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2442 struct sk_buff_head *seg_queue,
2443 struct msghdr *msg, size_t len)
2445 struct sk_buff *skb;
2450 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2452 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2453 * so fragmented skbs are not used. The HCI layer's handling
2454 * of fragmented skbs is not compatible with ERTM's queueing.
2457 /* PDU size is derived from the HCI MTU */
2458 pdu_len = chan->conn->mtu;
2460 /* Constrain PDU size for BR/EDR connections */
2462 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2464 /* Adjust for largest possible L2CAP overhead. */
2466 pdu_len -= L2CAP_FCS_SIZE;
2468 pdu_len -= __ertm_hdr_size(chan);
2470 /* Remote device may have requested smaller PDUs */
2471 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2473 if (len <= pdu_len) {
2474 sar = L2CAP_SAR_UNSEGMENTED;
2478 sar = L2CAP_SAR_START;
2480 pdu_len -= L2CAP_SDULEN_SIZE;
2484 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2487 __skb_queue_purge(seg_queue);
2488 return PTR_ERR(skb);
2491 bt_cb(skb)->control.sar = sar;
2492 __skb_queue_tail(seg_queue, skb);
2497 pdu_len += L2CAP_SDULEN_SIZE;
2500 if (len <= pdu_len) {
2501 sar = L2CAP_SAR_END;
2504 sar = L2CAP_SAR_CONTINUE;
2511 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2514 struct sk_buff *skb;
2516 struct sk_buff_head seg_queue;
2521 /* Connectionless channel */
2522 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2523 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2525 return PTR_ERR(skb);
2527 l2cap_do_send(chan, skb);
2531 switch (chan->mode) {
2532 case L2CAP_MODE_BASIC:
2533 /* Check outgoing MTU */
2534 if (len > chan->omtu)
2537 /* Create a basic PDU */
2538 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2540 return PTR_ERR(skb);
2542 l2cap_do_send(chan, skb);
2546 case L2CAP_MODE_ERTM:
2547 case L2CAP_MODE_STREAMING:
2548 /* Check outgoing MTU */
2549 if (len > chan->omtu) {
2554 __skb_queue_head_init(&seg_queue);
2556 /* Do segmentation before calling in to the state machine,
2557 * since it's possible to block while waiting for memory
2560 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2562 /* The channel could have been closed while segmenting,
2563 * check that it is still connected.
2565 if (chan->state != BT_CONNECTED) {
2566 __skb_queue_purge(&seg_queue);
2573 if (chan->mode == L2CAP_MODE_ERTM)
2574 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2576 l2cap_streaming_send(chan, &seg_queue);
2580 /* If the skbs were not queued for sending, they'll still be in
2581 * seg_queue and need to be purged.
2583 __skb_queue_purge(&seg_queue);
2587 BT_DBG("bad state %1.1x", chan->mode);
2594 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2596 struct l2cap_ctrl control;
2599 BT_DBG("chan %p, txseq %u", chan, txseq);
2601 memset(&control, 0, sizeof(control));
2603 control.super = L2CAP_SUPER_SREJ;
2605 for (seq = chan->expected_tx_seq; seq != txseq;
2606 seq = __next_seq(chan, seq)) {
2607 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2608 control.reqseq = seq;
2609 l2cap_send_sframe(chan, &control);
2610 l2cap_seq_list_append(&chan->srej_list, seq);
2614 chan->expected_tx_seq = __next_seq(chan, txseq);
2617 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2619 struct l2cap_ctrl control;
2621 BT_DBG("chan %p", chan);
2623 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2626 memset(&control, 0, sizeof(control));
2628 control.super = L2CAP_SUPER_SREJ;
2629 control.reqseq = chan->srej_list.tail;
2630 l2cap_send_sframe(chan, &control);
2633 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2635 struct l2cap_ctrl control;
2639 BT_DBG("chan %p, txseq %u", chan, txseq);
2641 memset(&control, 0, sizeof(control));
2643 control.super = L2CAP_SUPER_SREJ;
2645 /* Capture initial list head to allow only one pass through the list. */
2646 initial_head = chan->srej_list.head;
2649 seq = l2cap_seq_list_pop(&chan->srej_list);
2650 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2653 control.reqseq = seq;
2654 l2cap_send_sframe(chan, &control);
2655 l2cap_seq_list_append(&chan->srej_list, seq);
2656 } while (chan->srej_list.head != initial_head);
2659 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2661 struct sk_buff *acked_skb;
2664 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2666 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2669 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2670 chan->expected_ack_seq, chan->unacked_frames);
2672 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2673 ackseq = __next_seq(chan, ackseq)) {
2675 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2677 skb_unlink(acked_skb, &chan->tx_q);
2678 kfree_skb(acked_skb);
2679 chan->unacked_frames--;
2683 chan->expected_ack_seq = reqseq;
2685 if (chan->unacked_frames == 0)
2686 __clear_retrans_timer(chan);
2688 BT_DBG("unacked_frames %u", chan->unacked_frames);
2691 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2693 BT_DBG("chan %p", chan);
2695 chan->expected_tx_seq = chan->buffer_seq;
2696 l2cap_seq_list_clear(&chan->srej_list);
2697 skb_queue_purge(&chan->srej_q);
2698 chan->rx_state = L2CAP_RX_STATE_RECV;
2701 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2702 struct l2cap_ctrl *control,
2703 struct sk_buff_head *skbs, u8 event)
2705 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2709 case L2CAP_EV_DATA_REQUEST:
2710 if (chan->tx_send_head == NULL)
2711 chan->tx_send_head = skb_peek(skbs);
2713 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2714 l2cap_ertm_send(chan);
2716 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2717 BT_DBG("Enter LOCAL_BUSY");
2718 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2721 /* The SREJ_SENT state must be aborted if we are to
2722 * enter the LOCAL_BUSY state.
2724 l2cap_abort_rx_srej_sent(chan);
2727 l2cap_send_ack(chan);
2730 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2731 BT_DBG("Exit LOCAL_BUSY");
2732 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2735 struct l2cap_ctrl local_control;
2737 memset(&local_control, 0, sizeof(local_control));
2738 local_control.sframe = 1;
2739 local_control.super = L2CAP_SUPER_RR;
2740 local_control.poll = 1;
2741 local_control.reqseq = chan->buffer_seq;
2742 l2cap_send_sframe(chan, &local_control);
2744 chan->retry_count = 1;
2745 __set_monitor_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2749 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2750 l2cap_process_reqseq(chan, control->reqseq);
2752 case L2CAP_EV_EXPLICIT_POLL:
2753 l2cap_send_rr_or_rnr(chan, 1);
2754 chan->retry_count = 1;
2755 __set_monitor_timer(chan);
2756 __clear_ack_timer(chan);
2757 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2759 case L2CAP_EV_RETRANS_TO:
2760 l2cap_send_rr_or_rnr(chan, 1);
2761 chan->retry_count = 1;
2762 __set_monitor_timer(chan);
2763 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2765 case L2CAP_EV_RECV_FBIT:
2766 /* Nothing to process */
2773 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2774 struct l2cap_ctrl *control,
2775 struct sk_buff_head *skbs, u8 event)
2777 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2781 case L2CAP_EV_DATA_REQUEST:
2782 if (chan->tx_send_head == NULL)
2783 chan->tx_send_head = skb_peek(skbs);
2784 /* Queue data, but don't send. */
2785 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2787 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2788 BT_DBG("Enter LOCAL_BUSY");
2789 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2791 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2792 /* The SREJ_SENT state must be aborted if we are to
2793 * enter the LOCAL_BUSY state.
2795 l2cap_abort_rx_srej_sent(chan);
2798 l2cap_send_ack(chan);
2801 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2802 BT_DBG("Exit LOCAL_BUSY");
2803 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2805 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2806 struct l2cap_ctrl local_control;
2807 memset(&local_control, 0, sizeof(local_control));
2808 local_control.sframe = 1;
2809 local_control.super = L2CAP_SUPER_RR;
2810 local_control.poll = 1;
2811 local_control.reqseq = chan->buffer_seq;
2812 l2cap_send_sframe(chan, &local_control);
2814 chan->retry_count = 1;
2815 __set_monitor_timer(chan);
2816 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2819 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2820 l2cap_process_reqseq(chan, control->reqseq);
2824 case L2CAP_EV_RECV_FBIT:
2825 if (control && control->final) {
2826 __clear_monitor_timer(chan);
2827 if (chan->unacked_frames > 0)
2828 __set_retrans_timer(chan);
2829 chan->retry_count = 0;
2830 chan->tx_state = L2CAP_TX_STATE_XMIT;
2831 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2834 case L2CAP_EV_EXPLICIT_POLL:
2837 case L2CAP_EV_MONITOR_TO:
2838 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2839 l2cap_send_rr_or_rnr(chan, 1);
2840 __set_monitor_timer(chan);
2841 chan->retry_count++;
2843 l2cap_send_disconn_req(chan, ECONNABORTED);
2851 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2852 struct sk_buff_head *skbs, u8 event)
2854 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2855 chan, control, skbs, event, chan->tx_state);
2857 switch (chan->tx_state) {
2858 case L2CAP_TX_STATE_XMIT:
2859 l2cap_tx_state_xmit(chan, control, skbs, event);
2861 case L2CAP_TX_STATE_WAIT_F:
2862 l2cap_tx_state_wait_f(chan, control, skbs, event);
2870 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2871 struct l2cap_ctrl *control)
2873 BT_DBG("chan %p, control %p", chan, control);
2874 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2877 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2878 struct l2cap_ctrl *control)
2880 BT_DBG("chan %p, control %p", chan, control);
2881 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2884 /* Copy frame to all raw sockets on that connection */
2885 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2887 struct sk_buff *nskb;
2888 struct l2cap_chan *chan;
2890 BT_DBG("conn %p", conn);
2892 mutex_lock(&conn->chan_lock);
2894 list_for_each_entry(chan, &conn->chan_l, list) {
2895 if (chan->chan_type != L2CAP_CHAN_RAW)
2898 /* Don't send frame to the channel it came from */
2899 if (bt_cb(skb)->chan == chan)
2902 nskb = skb_clone(skb, GFP_KERNEL);
2905 if (chan->ops->recv(chan, nskb))
2909 mutex_unlock(&conn->chan_lock);
2912 /* ---- L2CAP signalling commands ---- */
2913 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2914 u8 ident, u16 dlen, void *data)
2916 struct sk_buff *skb, **frag;
2917 struct l2cap_cmd_hdr *cmd;
2918 struct l2cap_hdr *lh;
2921 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2922 conn, code, ident, dlen);
2924 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2927 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2928 count = min_t(unsigned int, conn->mtu, len);
2930 skb = bt_skb_alloc(count, GFP_KERNEL);
2934 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2935 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2937 if (conn->hcon->type == LE_LINK)
2938 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2940 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2942 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2945 cmd->len = cpu_to_le16(dlen);
2948 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2949 memcpy(skb_put(skb, count), data, count);
2955 /* Continuation fragments (no L2CAP header) */
2956 frag = &skb_shinfo(skb)->frag_list;
2958 count = min_t(unsigned int, conn->mtu, len);
2960 *frag = bt_skb_alloc(count, GFP_KERNEL);
2964 memcpy(skb_put(*frag, count), data, count);
2969 frag = &(*frag)->next;
2979 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2982 struct l2cap_conf_opt *opt = *ptr;
2985 len = L2CAP_CONF_OPT_SIZE + opt->len;
2993 *val = *((u8 *) opt->val);
2997 *val = get_unaligned_le16(opt->val);
3001 *val = get_unaligned_le32(opt->val);
3005 *val = (unsigned long) opt->val;
3009 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3013 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3015 struct l2cap_conf_opt *opt = *ptr;
3017 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3024 *((u8 *) opt->val) = val;
3028 put_unaligned_le16(val, opt->val);
3032 put_unaligned_le32(val, opt->val);
3036 memcpy(opt->val, (void *) val, len);
3040 *ptr += L2CAP_CONF_OPT_SIZE + len;
3043 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3045 struct l2cap_conf_efs efs;
3047 switch (chan->mode) {
3048 case L2CAP_MODE_ERTM:
3049 efs.id = chan->local_id;
3050 efs.stype = chan->local_stype;
3051 efs.msdu = cpu_to_le16(chan->local_msdu);
3052 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3053 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3054 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3057 case L2CAP_MODE_STREAMING:
3059 efs.stype = L2CAP_SERV_BESTEFFORT;
3060 efs.msdu = cpu_to_le16(chan->local_msdu);
3061 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3070 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3071 (unsigned long) &efs);
3074 static void l2cap_ack_timeout(struct work_struct *work)
3076 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3080 BT_DBG("chan %p", chan);
3082 l2cap_chan_lock(chan);
3084 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3085 chan->last_acked_seq);
3088 l2cap_send_rr_or_rnr(chan, 0);
3090 l2cap_chan_unlock(chan);
3091 l2cap_chan_put(chan);
3094 int l2cap_ertm_init(struct l2cap_chan *chan)
3098 chan->next_tx_seq = 0;
3099 chan->expected_tx_seq = 0;
3100 chan->expected_ack_seq = 0;
3101 chan->unacked_frames = 0;
3102 chan->buffer_seq = 0;
3103 chan->frames_sent = 0;
3104 chan->last_acked_seq = 0;
3106 chan->sdu_last_frag = NULL;
3109 skb_queue_head_init(&chan->tx_q);
3111 chan->local_amp_id = AMP_ID_BREDR;
3112 chan->move_id = AMP_ID_BREDR;
3113 chan->move_state = L2CAP_MOVE_STABLE;
3114 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3116 if (chan->mode != L2CAP_MODE_ERTM)
3119 chan->rx_state = L2CAP_RX_STATE_RECV;
3120 chan->tx_state = L2CAP_TX_STATE_XMIT;
3122 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3123 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3124 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3126 skb_queue_head_init(&chan->srej_q);
3128 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3132 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3134 l2cap_seq_list_free(&chan->srej_list);
3139 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3142 case L2CAP_MODE_STREAMING:
3143 case L2CAP_MODE_ERTM:
3144 if (l2cap_mode_supported(mode, remote_feat_mask))
3148 return L2CAP_MODE_BASIC;
3152 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3154 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3157 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3159 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3162 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3163 struct l2cap_conf_rfc *rfc)
3165 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3166 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3168 /* Class 1 devices have must have ERTM timeouts
3169 * exceeding the Link Supervision Timeout. The
3170 * default Link Supervision Timeout for AMP
3171 * controllers is 10 seconds.
3173 * Class 1 devices use 0xffffffff for their
3174 * best-effort flush timeout, so the clamping logic
3175 * will result in a timeout that meets the above
3176 * requirement. ERTM timeouts are 16-bit values, so
3177 * the maximum timeout is 65.535 seconds.
3180 /* Convert timeout to milliseconds and round */
3181 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3183 /* This is the recommended formula for class 2 devices
3184 * that start ERTM timers when packets are sent to the
3187 ertm_to = 3 * ertm_to + 500;
3189 if (ertm_to > 0xffff)
3192 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3193 rfc->monitor_timeout = rfc->retrans_timeout;
3195 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3196 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3200 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3202 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3203 __l2cap_ews_supported(chan->conn)) {
3204 /* use extended control field */
3205 set_bit(FLAG_EXT_CTRL, &chan->flags);
3206 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3208 chan->tx_win = min_t(u16, chan->tx_win,
3209 L2CAP_DEFAULT_TX_WINDOW);
3210 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3212 chan->ack_win = chan->tx_win;
3215 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3217 struct l2cap_conf_req *req = data;
3218 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3219 void *ptr = req->data;
3222 BT_DBG("chan %p", chan);
3224 if (chan->num_conf_req || chan->num_conf_rsp)
3227 switch (chan->mode) {
3228 case L2CAP_MODE_STREAMING:
3229 case L2CAP_MODE_ERTM:
3230 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3233 if (__l2cap_efs_supported(chan->conn))
3234 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3238 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3243 if (chan->imtu != L2CAP_DEFAULT_MTU)
3244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3246 switch (chan->mode) {
3247 case L2CAP_MODE_BASIC:
3248 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3249 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3252 rfc.mode = L2CAP_MODE_BASIC;
3254 rfc.max_transmit = 0;
3255 rfc.retrans_timeout = 0;
3256 rfc.monitor_timeout = 0;
3257 rfc.max_pdu_size = 0;
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3260 (unsigned long) &rfc);
3263 case L2CAP_MODE_ERTM:
3264 rfc.mode = L2CAP_MODE_ERTM;
3265 rfc.max_transmit = chan->max_tx;
3267 __l2cap_set_ertm_timeouts(chan, &rfc);
3269 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3270 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3272 rfc.max_pdu_size = cpu_to_le16(size);
3274 l2cap_txwin_setup(chan);
3276 rfc.txwin_size = min_t(u16, chan->tx_win,
3277 L2CAP_DEFAULT_TX_WINDOW);
3279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3280 (unsigned long) &rfc);
3282 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3283 l2cap_add_opt_efs(&ptr, chan);
3285 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3286 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3289 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3290 if (chan->fcs == L2CAP_FCS_NONE ||
3291 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3292 chan->fcs = L2CAP_FCS_NONE;
3293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3298 case L2CAP_MODE_STREAMING:
3299 l2cap_txwin_setup(chan);
3300 rfc.mode = L2CAP_MODE_STREAMING;
3302 rfc.max_transmit = 0;
3303 rfc.retrans_timeout = 0;
3304 rfc.monitor_timeout = 0;
3306 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3307 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3309 rfc.max_pdu_size = cpu_to_le16(size);
3311 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3312 (unsigned long) &rfc);
3314 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3315 l2cap_add_opt_efs(&ptr, chan);
3317 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3318 if (chan->fcs == L2CAP_FCS_NONE ||
3319 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3320 chan->fcs = L2CAP_FCS_NONE;
3321 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3327 req->dcid = cpu_to_le16(chan->dcid);
3328 req->flags = __constant_cpu_to_le16(0);
3333 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3335 struct l2cap_conf_rsp *rsp = data;
3336 void *ptr = rsp->data;
3337 void *req = chan->conf_req;
3338 int len = chan->conf_len;
3339 int type, hint, olen;
3341 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3342 struct l2cap_conf_efs efs;
3344 u16 mtu = L2CAP_DEFAULT_MTU;
3345 u16 result = L2CAP_CONF_SUCCESS;
3348 BT_DBG("chan %p", chan);
3350 while (len >= L2CAP_CONF_OPT_SIZE) {
3351 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3353 hint = type & L2CAP_CONF_HINT;
3354 type &= L2CAP_CONF_MASK;
3357 case L2CAP_CONF_MTU:
3361 case L2CAP_CONF_FLUSH_TO:
3362 chan->flush_to = val;
3365 case L2CAP_CONF_QOS:
3368 case L2CAP_CONF_RFC:
3369 if (olen == sizeof(rfc))
3370 memcpy(&rfc, (void *) val, olen);
3373 case L2CAP_CONF_FCS:
3374 if (val == L2CAP_FCS_NONE)
3375 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3378 case L2CAP_CONF_EFS:
3380 if (olen == sizeof(efs))
3381 memcpy(&efs, (void *) val, olen);
3384 case L2CAP_CONF_EWS:
3385 if (!chan->conn->hs_enabled)
3386 return -ECONNREFUSED;
3388 set_bit(FLAG_EXT_CTRL, &chan->flags);
3389 set_bit(CONF_EWS_RECV, &chan->conf_state);
3390 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3391 chan->remote_tx_win = val;
3398 result = L2CAP_CONF_UNKNOWN;
3399 *((u8 *) ptr++) = type;
3404 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3407 switch (chan->mode) {
3408 case L2CAP_MODE_STREAMING:
3409 case L2CAP_MODE_ERTM:
3410 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3411 chan->mode = l2cap_select_mode(rfc.mode,
3412 chan->conn->feat_mask);
3417 if (__l2cap_efs_supported(chan->conn))
3418 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3420 return -ECONNREFUSED;
3423 if (chan->mode != rfc.mode)
3424 return -ECONNREFUSED;
3430 if (chan->mode != rfc.mode) {
3431 result = L2CAP_CONF_UNACCEPT;
3432 rfc.mode = chan->mode;
3434 if (chan->num_conf_rsp == 1)
3435 return -ECONNREFUSED;
3437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3438 (unsigned long) &rfc);
3441 if (result == L2CAP_CONF_SUCCESS) {
3442 /* Configure output options and let the other side know
3443 * which ones we don't like. */
3445 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3446 result = L2CAP_CONF_UNACCEPT;
3449 set_bit(CONF_MTU_DONE, &chan->conf_state);
3451 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3454 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3455 efs.stype != L2CAP_SERV_NOTRAFIC &&
3456 efs.stype != chan->local_stype) {
3458 result = L2CAP_CONF_UNACCEPT;
3460 if (chan->num_conf_req >= 1)
3461 return -ECONNREFUSED;
3463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3465 (unsigned long) &efs);
3467 /* Send PENDING Conf Rsp */
3468 result = L2CAP_CONF_PENDING;
3469 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3474 case L2CAP_MODE_BASIC:
3475 chan->fcs = L2CAP_FCS_NONE;
3476 set_bit(CONF_MODE_DONE, &chan->conf_state);
3479 case L2CAP_MODE_ERTM:
3480 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3481 chan->remote_tx_win = rfc.txwin_size;
3483 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3485 chan->remote_max_tx = rfc.max_transmit;
3487 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3488 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3489 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3490 rfc.max_pdu_size = cpu_to_le16(size);
3491 chan->remote_mps = size;
3493 __l2cap_set_ertm_timeouts(chan, &rfc);
3495 set_bit(CONF_MODE_DONE, &chan->conf_state);
3497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3498 sizeof(rfc), (unsigned long) &rfc);
3500 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3501 chan->remote_id = efs.id;
3502 chan->remote_stype = efs.stype;
3503 chan->remote_msdu = le16_to_cpu(efs.msdu);
3504 chan->remote_flush_to =
3505 le32_to_cpu(efs.flush_to);
3506 chan->remote_acc_lat =
3507 le32_to_cpu(efs.acc_lat);
3508 chan->remote_sdu_itime =
3509 le32_to_cpu(efs.sdu_itime);
3510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3512 (unsigned long) &efs);
3516 case L2CAP_MODE_STREAMING:
3517 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3518 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3519 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3520 rfc.max_pdu_size = cpu_to_le16(size);
3521 chan->remote_mps = size;
3523 set_bit(CONF_MODE_DONE, &chan->conf_state);
3525 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3526 (unsigned long) &rfc);
3531 result = L2CAP_CONF_UNACCEPT;
3533 memset(&rfc, 0, sizeof(rfc));
3534 rfc.mode = chan->mode;
3537 if (result == L2CAP_CONF_SUCCESS)
3538 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3540 rsp->scid = cpu_to_le16(chan->dcid);
3541 rsp->result = cpu_to_le16(result);
3542 rsp->flags = __constant_cpu_to_le16(0);
3547 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3548 void *data, u16 *result)
3550 struct l2cap_conf_req *req = data;
3551 void *ptr = req->data;
3554 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3555 struct l2cap_conf_efs efs;
3557 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3559 while (len >= L2CAP_CONF_OPT_SIZE) {
3560 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3563 case L2CAP_CONF_MTU:
3564 if (val < L2CAP_DEFAULT_MIN_MTU) {
3565 *result = L2CAP_CONF_UNACCEPT;
3566 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3569 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3572 case L2CAP_CONF_FLUSH_TO:
3573 chan->flush_to = val;
3574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3578 case L2CAP_CONF_RFC:
3579 if (olen == sizeof(rfc))
3580 memcpy(&rfc, (void *)val, olen);
3582 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3583 rfc.mode != chan->mode)
3584 return -ECONNREFUSED;
3588 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3589 sizeof(rfc), (unsigned long) &rfc);
3592 case L2CAP_CONF_EWS:
3593 chan->ack_win = min_t(u16, val, chan->ack_win);
3594 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3598 case L2CAP_CONF_EFS:
3599 if (olen == sizeof(efs))
3600 memcpy(&efs, (void *)val, olen);
3602 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3603 efs.stype != L2CAP_SERV_NOTRAFIC &&
3604 efs.stype != chan->local_stype)
3605 return -ECONNREFUSED;
3607 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3608 (unsigned long) &efs);
3611 case L2CAP_CONF_FCS:
3612 if (*result == L2CAP_CONF_PENDING)
3613 if (val == L2CAP_FCS_NONE)
3614 set_bit(CONF_RECV_NO_FCS,
3620 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3621 return -ECONNREFUSED;
3623 chan->mode = rfc.mode;
3625 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3627 case L2CAP_MODE_ERTM:
3628 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3629 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3630 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3631 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3632 chan->ack_win = min_t(u16, chan->ack_win,
3635 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3636 chan->local_msdu = le16_to_cpu(efs.msdu);
3637 chan->local_sdu_itime =
3638 le32_to_cpu(efs.sdu_itime);
3639 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3640 chan->local_flush_to =
3641 le32_to_cpu(efs.flush_to);
3645 case L2CAP_MODE_STREAMING:
3646 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3650 req->dcid = cpu_to_le16(chan->dcid);
3651 req->flags = __constant_cpu_to_le16(0);
3656 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3657 u16 result, u16 flags)
3659 struct l2cap_conf_rsp *rsp = data;
3660 void *ptr = rsp->data;
3662 BT_DBG("chan %p", chan);
3664 rsp->scid = cpu_to_le16(chan->dcid);
3665 rsp->result = cpu_to_le16(result);
3666 rsp->flags = cpu_to_le16(flags);
3671 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3673 struct l2cap_le_conn_rsp rsp;
3674 struct l2cap_conn *conn = chan->conn;
3676 BT_DBG("chan %p", chan);
3678 rsp.dcid = cpu_to_le16(chan->scid);
3679 rsp.mtu = cpu_to_le16(chan->imtu);
3680 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
3681 rsp.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
3682 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3684 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3688 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3690 struct l2cap_conn_rsp rsp;
3691 struct l2cap_conn *conn = chan->conn;
3695 rsp.scid = cpu_to_le16(chan->dcid);
3696 rsp.dcid = cpu_to_le16(chan->scid);
3697 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3698 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3701 rsp_code = L2CAP_CREATE_CHAN_RSP;
3703 rsp_code = L2CAP_CONN_RSP;
3705 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3707 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3709 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3712 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3713 l2cap_build_conf_req(chan, buf), buf);
3714 chan->num_conf_req++;
3717 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3721 /* Use sane default values in case a misbehaving remote device
3722 * did not send an RFC or extended window size option.
3724 u16 txwin_ext = chan->ack_win;
3725 struct l2cap_conf_rfc rfc = {
3727 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3728 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3729 .max_pdu_size = cpu_to_le16(chan->imtu),
3730 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3733 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3735 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3738 while (len >= L2CAP_CONF_OPT_SIZE) {
3739 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3742 case L2CAP_CONF_RFC:
3743 if (olen == sizeof(rfc))
3744 memcpy(&rfc, (void *)val, olen);
3746 case L2CAP_CONF_EWS:
3753 case L2CAP_MODE_ERTM:
3754 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3755 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3756 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3757 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3758 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3760 chan->ack_win = min_t(u16, chan->ack_win,
3763 case L2CAP_MODE_STREAMING:
3764 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3768 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3769 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3772 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3774 if (cmd_len < sizeof(*rej))
3777 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3780 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3781 cmd->ident == conn->info_ident) {
3782 cancel_delayed_work(&conn->info_timer);
3784 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3785 conn->info_ident = 0;
3787 l2cap_conn_start(conn);
3793 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3794 struct l2cap_cmd_hdr *cmd,
3795 u8 *data, u8 rsp_code, u8 amp_id)
3797 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3798 struct l2cap_conn_rsp rsp;
3799 struct l2cap_chan *chan = NULL, *pchan;
3800 int result, status = L2CAP_CS_NO_INFO;
3802 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3803 __le16 psm = req->psm;
3805 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3807 /* Check if we have socket listening on psm */
3808 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3809 &conn->hcon->dst, ACL_LINK);
3811 result = L2CAP_CR_BAD_PSM;
3815 mutex_lock(&conn->chan_lock);
3816 l2cap_chan_lock(pchan);
3818 /* Check if the ACL is secure enough (if not SDP) */
3819 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3820 !hci_conn_check_link_mode(conn->hcon)) {
3821 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3822 result = L2CAP_CR_SEC_BLOCK;
3826 result = L2CAP_CR_NO_MEM;
3828 /* Check if we already have channel with that dcid */
3829 if (__l2cap_get_chan_by_dcid(conn, scid))
3832 chan = pchan->ops->new_connection(pchan);
3836 /* For certain devices (ex: HID mouse), support for authentication,
3837 * pairing and bonding is optional. For such devices, inorder to avoid
3838 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3839 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3841 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3843 bacpy(&chan->src, &conn->hcon->src);
3844 bacpy(&chan->dst, &conn->hcon->dst);
3845 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3846 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3849 chan->local_amp_id = amp_id;
3851 __l2cap_chan_add(conn, chan);
3855 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3857 chan->ident = cmd->ident;
3859 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3860 if (l2cap_chan_check_security(chan)) {
3861 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3862 l2cap_state_change(chan, BT_CONNECT2);
3863 result = L2CAP_CR_PEND;
3864 status = L2CAP_CS_AUTHOR_PEND;
3865 chan->ops->defer(chan);
3867 /* Force pending result for AMP controllers.
3868 * The connection will succeed after the
3869 * physical link is up.
3871 if (amp_id == AMP_ID_BREDR) {
3872 l2cap_state_change(chan, BT_CONFIG);
3873 result = L2CAP_CR_SUCCESS;
3875 l2cap_state_change(chan, BT_CONNECT2);
3876 result = L2CAP_CR_PEND;
3878 status = L2CAP_CS_NO_INFO;
3881 l2cap_state_change(chan, BT_CONNECT2);
3882 result = L2CAP_CR_PEND;
3883 status = L2CAP_CS_AUTHEN_PEND;
3886 l2cap_state_change(chan, BT_CONNECT2);
3887 result = L2CAP_CR_PEND;
3888 status = L2CAP_CS_NO_INFO;
3892 l2cap_chan_unlock(pchan);
3893 mutex_unlock(&conn->chan_lock);
3896 rsp.scid = cpu_to_le16(scid);
3897 rsp.dcid = cpu_to_le16(dcid);
3898 rsp.result = cpu_to_le16(result);
3899 rsp.status = cpu_to_le16(status);
3900 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3902 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3903 struct l2cap_info_req info;
3904 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3906 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3907 conn->info_ident = l2cap_get_ident(conn);
3909 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3911 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3912 sizeof(info), &info);
3915 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3916 result == L2CAP_CR_SUCCESS) {
3918 set_bit(CONF_REQ_SENT, &chan->conf_state);
3919 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3920 l2cap_build_conf_req(chan, buf), buf);
3921 chan->num_conf_req++;
3927 static int l2cap_connect_req(struct l2cap_conn *conn,
3928 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3930 struct hci_dev *hdev = conn->hcon->hdev;
3931 struct hci_conn *hcon = conn->hcon;
3933 if (cmd_len < sizeof(struct l2cap_conn_req))
3937 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3938 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3939 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3940 hcon->dst_type, 0, NULL, 0,
3942 hci_dev_unlock(hdev);
3944 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3948 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3949 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3952 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3953 u16 scid, dcid, result, status;
3954 struct l2cap_chan *chan;
3958 if (cmd_len < sizeof(*rsp))
3961 scid = __le16_to_cpu(rsp->scid);
3962 dcid = __le16_to_cpu(rsp->dcid);
3963 result = __le16_to_cpu(rsp->result);
3964 status = __le16_to_cpu(rsp->status);
3966 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3967 dcid, scid, result, status);
3969 mutex_lock(&conn->chan_lock);
3972 chan = __l2cap_get_chan_by_scid(conn, scid);
3978 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3987 l2cap_chan_lock(chan);
3990 case L2CAP_CR_SUCCESS:
3991 l2cap_state_change(chan, BT_CONFIG);
3994 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3996 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3999 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4000 l2cap_build_conf_req(chan, req), req);
4001 chan->num_conf_req++;
4005 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4009 l2cap_chan_del(chan, ECONNREFUSED);
4013 l2cap_chan_unlock(chan);
4016 mutex_unlock(&conn->chan_lock);
4021 static inline void set_default_fcs(struct l2cap_chan *chan)
4023 /* FCS is enabled only in ERTM or streaming mode, if one or both
4026 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4027 chan->fcs = L2CAP_FCS_NONE;
4028 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4029 chan->fcs = L2CAP_FCS_CRC16;
4032 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4033 u8 ident, u16 flags)
4035 struct l2cap_conn *conn = chan->conn;
4037 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4040 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4041 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4043 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4044 l2cap_build_conf_rsp(chan, data,
4045 L2CAP_CONF_SUCCESS, flags), data);
4048 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4051 struct l2cap_cmd_rej_cid rej;
4053 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4054 rej.scid = __cpu_to_le16(scid);
4055 rej.dcid = __cpu_to_le16(dcid);
4057 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4060 static inline int l2cap_config_req(struct l2cap_conn *conn,
4061 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4064 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4067 struct l2cap_chan *chan;
4070 if (cmd_len < sizeof(*req))
4073 dcid = __le16_to_cpu(req->dcid);
4074 flags = __le16_to_cpu(req->flags);
4076 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4078 chan = l2cap_get_chan_by_scid(conn, dcid);
4080 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4084 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4085 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4090 /* Reject if config buffer is too small. */
4091 len = cmd_len - sizeof(*req);
4092 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4093 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4094 l2cap_build_conf_rsp(chan, rsp,
4095 L2CAP_CONF_REJECT, flags), rsp);
4100 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4101 chan->conf_len += len;
4103 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4104 /* Incomplete config. Send empty response. */
4105 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4106 l2cap_build_conf_rsp(chan, rsp,
4107 L2CAP_CONF_SUCCESS, flags), rsp);
4111 /* Complete config. */
4112 len = l2cap_parse_conf_req(chan, rsp);
4114 l2cap_send_disconn_req(chan, ECONNRESET);
4118 chan->ident = cmd->ident;
4119 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4120 chan->num_conf_rsp++;
4122 /* Reset config buffer. */
4125 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4128 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4129 set_default_fcs(chan);
4131 if (chan->mode == L2CAP_MODE_ERTM ||
4132 chan->mode == L2CAP_MODE_STREAMING)
4133 err = l2cap_ertm_init(chan);
4136 l2cap_send_disconn_req(chan, -err);
4138 l2cap_chan_ready(chan);
4143 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4145 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4146 l2cap_build_conf_req(chan, buf), buf);
4147 chan->num_conf_req++;
4150 /* Got Conf Rsp PENDING from remote side and asume we sent
4151 Conf Rsp PENDING in the code above */
4152 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4153 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4155 /* check compatibility */
4157 /* Send rsp for BR/EDR channel */
4159 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4161 chan->ident = cmd->ident;
4165 l2cap_chan_unlock(chan);
4169 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4170 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4173 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4174 u16 scid, flags, result;
4175 struct l2cap_chan *chan;
4176 int len = cmd_len - sizeof(*rsp);
4179 if (cmd_len < sizeof(*rsp))
4182 scid = __le16_to_cpu(rsp->scid);
4183 flags = __le16_to_cpu(rsp->flags);
4184 result = __le16_to_cpu(rsp->result);
4186 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4189 chan = l2cap_get_chan_by_scid(conn, scid);
4194 case L2CAP_CONF_SUCCESS:
4195 l2cap_conf_rfc_get(chan, rsp->data, len);
4196 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4199 case L2CAP_CONF_PENDING:
4200 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4202 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4205 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4208 l2cap_send_disconn_req(chan, ECONNRESET);
4212 if (!chan->hs_hcon) {
4213 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4216 if (l2cap_check_efs(chan)) {
4217 amp_create_logical_link(chan);
4218 chan->ident = cmd->ident;
4224 case L2CAP_CONF_UNACCEPT:
4225 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4228 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4229 l2cap_send_disconn_req(chan, ECONNRESET);
4233 /* throw out any old stored conf requests */
4234 result = L2CAP_CONF_SUCCESS;
4235 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4238 l2cap_send_disconn_req(chan, ECONNRESET);
4242 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4243 L2CAP_CONF_REQ, len, req);
4244 chan->num_conf_req++;
4245 if (result != L2CAP_CONF_SUCCESS)
4251 l2cap_chan_set_err(chan, ECONNRESET);
4253 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4254 l2cap_send_disconn_req(chan, ECONNRESET);
4258 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4261 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4263 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4264 set_default_fcs(chan);
4266 if (chan->mode == L2CAP_MODE_ERTM ||
4267 chan->mode == L2CAP_MODE_STREAMING)
4268 err = l2cap_ertm_init(chan);
4271 l2cap_send_disconn_req(chan, -err);
4273 l2cap_chan_ready(chan);
4277 l2cap_chan_unlock(chan);
4281 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4282 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4285 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4286 struct l2cap_disconn_rsp rsp;
4288 struct l2cap_chan *chan;
4290 if (cmd_len != sizeof(*req))
4293 scid = __le16_to_cpu(req->scid);
4294 dcid = __le16_to_cpu(req->dcid);
4296 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4298 mutex_lock(&conn->chan_lock);
4300 chan = __l2cap_get_chan_by_scid(conn, dcid);
4302 mutex_unlock(&conn->chan_lock);
4303 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4307 l2cap_chan_lock(chan);
4309 rsp.dcid = cpu_to_le16(chan->scid);
4310 rsp.scid = cpu_to_le16(chan->dcid);
4311 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4313 chan->ops->set_shutdown(chan);
4315 l2cap_chan_hold(chan);
4316 l2cap_chan_del(chan, ECONNRESET);
4318 l2cap_chan_unlock(chan);
4320 chan->ops->close(chan);
4321 l2cap_chan_put(chan);
4323 mutex_unlock(&conn->chan_lock);
4328 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4329 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4332 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4334 struct l2cap_chan *chan;
4336 if (cmd_len != sizeof(*rsp))
4339 scid = __le16_to_cpu(rsp->scid);
4340 dcid = __le16_to_cpu(rsp->dcid);
4342 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4344 mutex_lock(&conn->chan_lock);
4346 chan = __l2cap_get_chan_by_scid(conn, scid);
4348 mutex_unlock(&conn->chan_lock);
4352 l2cap_chan_lock(chan);
4354 l2cap_chan_hold(chan);
4355 l2cap_chan_del(chan, 0);
4357 l2cap_chan_unlock(chan);
4359 chan->ops->close(chan);
4360 l2cap_chan_put(chan);
4362 mutex_unlock(&conn->chan_lock);
4367 static inline int l2cap_information_req(struct l2cap_conn *conn,
4368 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4371 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4374 if (cmd_len != sizeof(*req))
4377 type = __le16_to_cpu(req->type);
4379 BT_DBG("type 0x%4.4x", type);
4381 if (type == L2CAP_IT_FEAT_MASK) {
4383 u32 feat_mask = l2cap_feat_mask;
4384 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4385 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4386 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4388 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4390 if (conn->hs_enabled)
4391 feat_mask |= L2CAP_FEAT_EXT_FLOW
4392 | L2CAP_FEAT_EXT_WINDOW;
4394 put_unaligned_le32(feat_mask, rsp->data);
4395 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4397 } else if (type == L2CAP_IT_FIXED_CHAN) {
4399 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4401 if (conn->hs_enabled)
4402 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4404 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4406 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4407 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4408 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4409 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4412 struct l2cap_info_rsp rsp;
4413 rsp.type = cpu_to_le16(type);
4414 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4415 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4422 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4423 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4426 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4429 if (cmd_len < sizeof(*rsp))
4432 type = __le16_to_cpu(rsp->type);
4433 result = __le16_to_cpu(rsp->result);
4435 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4437 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4438 if (cmd->ident != conn->info_ident ||
4439 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4442 cancel_delayed_work(&conn->info_timer);
4444 if (result != L2CAP_IR_SUCCESS) {
4445 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4446 conn->info_ident = 0;
4448 l2cap_conn_start(conn);
4454 case L2CAP_IT_FEAT_MASK:
4455 conn->feat_mask = get_unaligned_le32(rsp->data);
4457 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4458 struct l2cap_info_req req;
4459 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4461 conn->info_ident = l2cap_get_ident(conn);
4463 l2cap_send_cmd(conn, conn->info_ident,
4464 L2CAP_INFO_REQ, sizeof(req), &req);
4466 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4467 conn->info_ident = 0;
4469 l2cap_conn_start(conn);
4473 case L2CAP_IT_FIXED_CHAN:
4474 conn->fixed_chan_mask = rsp->data[0];
4475 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4476 conn->info_ident = 0;
4478 l2cap_conn_start(conn);
4485 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4486 struct l2cap_cmd_hdr *cmd,
4487 u16 cmd_len, void *data)
4489 struct l2cap_create_chan_req *req = data;
4490 struct l2cap_create_chan_rsp rsp;
4491 struct l2cap_chan *chan;
4492 struct hci_dev *hdev;
4495 if (cmd_len != sizeof(*req))
4498 if (!conn->hs_enabled)
4501 psm = le16_to_cpu(req->psm);
4502 scid = le16_to_cpu(req->scid);
4504 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4506 /* For controller id 0 make BR/EDR connection */
4507 if (req->amp_id == AMP_ID_BREDR) {
4508 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4513 /* Validate AMP controller id */
4514 hdev = hci_dev_get(req->amp_id);
4518 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4523 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4526 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4527 struct hci_conn *hs_hcon;
4529 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4533 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4538 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4540 mgr->bredr_chan = chan;
4541 chan->hs_hcon = hs_hcon;
4542 chan->fcs = L2CAP_FCS_NONE;
4543 conn->mtu = hdev->block_mtu;
4552 rsp.scid = cpu_to_le16(scid);
4553 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4554 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4556 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4562 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4564 struct l2cap_move_chan_req req;
4567 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4569 ident = l2cap_get_ident(chan->conn);
4570 chan->ident = ident;
4572 req.icid = cpu_to_le16(chan->scid);
4573 req.dest_amp_id = dest_amp_id;
4575 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4578 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4581 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4583 struct l2cap_move_chan_rsp rsp;
4585 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4587 rsp.icid = cpu_to_le16(chan->dcid);
4588 rsp.result = cpu_to_le16(result);
4590 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4594 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4596 struct l2cap_move_chan_cfm cfm;
4598 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4600 chan->ident = l2cap_get_ident(chan->conn);
4602 cfm.icid = cpu_to_le16(chan->scid);
4603 cfm.result = cpu_to_le16(result);
4605 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4608 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4611 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4613 struct l2cap_move_chan_cfm cfm;
4615 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4617 cfm.icid = cpu_to_le16(icid);
4618 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4620 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4624 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4627 struct l2cap_move_chan_cfm_rsp rsp;
4629 BT_DBG("icid 0x%4.4x", icid);
4631 rsp.icid = cpu_to_le16(icid);
4632 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4635 static void __release_logical_link(struct l2cap_chan *chan)
4637 chan->hs_hchan = NULL;
4638 chan->hs_hcon = NULL;
4640 /* Placeholder - release the logical link */
4643 static void l2cap_logical_fail(struct l2cap_chan *chan)
4645 /* Logical link setup failed */
4646 if (chan->state != BT_CONNECTED) {
4647 /* Create channel failure, disconnect */
4648 l2cap_send_disconn_req(chan, ECONNRESET);
4652 switch (chan->move_role) {
4653 case L2CAP_MOVE_ROLE_RESPONDER:
4654 l2cap_move_done(chan);
4655 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4657 case L2CAP_MOVE_ROLE_INITIATOR:
4658 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4659 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4660 /* Remote has only sent pending or
4661 * success responses, clean up
4663 l2cap_move_done(chan);
4666 /* Other amp move states imply that the move
4667 * has already aborted
4669 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4674 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4675 struct hci_chan *hchan)
4677 struct l2cap_conf_rsp rsp;
4679 chan->hs_hchan = hchan;
4680 chan->hs_hcon->l2cap_data = chan->conn;
4682 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4684 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4687 set_default_fcs(chan);
4689 err = l2cap_ertm_init(chan);
4691 l2cap_send_disconn_req(chan, -err);
4693 l2cap_chan_ready(chan);
4697 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4698 struct hci_chan *hchan)
4700 chan->hs_hcon = hchan->conn;
4701 chan->hs_hcon->l2cap_data = chan->conn;
4703 BT_DBG("move_state %d", chan->move_state);
4705 switch (chan->move_state) {
4706 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4707 /* Move confirm will be sent after a success
4708 * response is received
4710 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4712 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4713 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4714 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4715 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4716 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4717 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4718 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4719 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4720 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4724 /* Move was not in expected state, free the channel */
4725 __release_logical_link(chan);
4727 chan->move_state = L2CAP_MOVE_STABLE;
4731 /* Call with chan locked */
4732 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4735 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4738 l2cap_logical_fail(chan);
4739 __release_logical_link(chan);
4743 if (chan->state != BT_CONNECTED) {
4744 /* Ignore logical link if channel is on BR/EDR */
4745 if (chan->local_amp_id != AMP_ID_BREDR)
4746 l2cap_logical_finish_create(chan, hchan);
4748 l2cap_logical_finish_move(chan, hchan);
4752 void l2cap_move_start(struct l2cap_chan *chan)
4754 BT_DBG("chan %p", chan);
4756 if (chan->local_amp_id == AMP_ID_BREDR) {
4757 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4759 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4760 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4761 /* Placeholder - start physical link setup */
4763 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4764 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4766 l2cap_move_setup(chan);
4767 l2cap_send_move_chan_req(chan, 0);
4771 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4772 u8 local_amp_id, u8 remote_amp_id)
4774 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4775 local_amp_id, remote_amp_id);
4777 chan->fcs = L2CAP_FCS_NONE;
4779 /* Outgoing channel on AMP */
4780 if (chan->state == BT_CONNECT) {
4781 if (result == L2CAP_CR_SUCCESS) {
4782 chan->local_amp_id = local_amp_id;
4783 l2cap_send_create_chan_req(chan, remote_amp_id);
4785 /* Revert to BR/EDR connect */
4786 l2cap_send_conn_req(chan);
4792 /* Incoming channel on AMP */
4793 if (__l2cap_no_conn_pending(chan)) {
4794 struct l2cap_conn_rsp rsp;
4796 rsp.scid = cpu_to_le16(chan->dcid);
4797 rsp.dcid = cpu_to_le16(chan->scid);
4799 if (result == L2CAP_CR_SUCCESS) {
4800 /* Send successful response */
4801 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4802 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4804 /* Send negative response */
4805 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4806 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4809 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4812 if (result == L2CAP_CR_SUCCESS) {
4813 l2cap_state_change(chan, BT_CONFIG);
4814 set_bit(CONF_REQ_SENT, &chan->conf_state);
4815 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4817 l2cap_build_conf_req(chan, buf), buf);
4818 chan->num_conf_req++;
4823 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4826 l2cap_move_setup(chan);
4827 chan->move_id = local_amp_id;
4828 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4830 l2cap_send_move_chan_req(chan, remote_amp_id);
4833 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4835 struct hci_chan *hchan = NULL;
4837 /* Placeholder - get hci_chan for logical link */
4840 if (hchan->state == BT_CONNECTED) {
4841 /* Logical link is ready to go */
4842 chan->hs_hcon = hchan->conn;
4843 chan->hs_hcon->l2cap_data = chan->conn;
4844 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4845 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4847 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4849 /* Wait for logical link to be ready */
4850 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4853 /* Logical link not available */
4854 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4858 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4860 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4862 if (result == -EINVAL)
4863 rsp_result = L2CAP_MR_BAD_ID;
4865 rsp_result = L2CAP_MR_NOT_ALLOWED;
4867 l2cap_send_move_chan_rsp(chan, rsp_result);
4870 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4871 chan->move_state = L2CAP_MOVE_STABLE;
4873 /* Restart data transmission */
4874 l2cap_ertm_send(chan);
4877 /* Invoke with locked chan */
4878 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4880 u8 local_amp_id = chan->local_amp_id;
4881 u8 remote_amp_id = chan->remote_amp_id;
4883 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4884 chan, result, local_amp_id, remote_amp_id);
4886 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4887 l2cap_chan_unlock(chan);
4891 if (chan->state != BT_CONNECTED) {
4892 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4893 } else if (result != L2CAP_MR_SUCCESS) {
4894 l2cap_do_move_cancel(chan, result);
4896 switch (chan->move_role) {
4897 case L2CAP_MOVE_ROLE_INITIATOR:
4898 l2cap_do_move_initiate(chan, local_amp_id,
4901 case L2CAP_MOVE_ROLE_RESPONDER:
4902 l2cap_do_move_respond(chan, result);
4905 l2cap_do_move_cancel(chan, result);
4911 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4912 struct l2cap_cmd_hdr *cmd,
4913 u16 cmd_len, void *data)
4915 struct l2cap_move_chan_req *req = data;
4916 struct l2cap_move_chan_rsp rsp;
4917 struct l2cap_chan *chan;
4919 u16 result = L2CAP_MR_NOT_ALLOWED;
4921 if (cmd_len != sizeof(*req))
4924 icid = le16_to_cpu(req->icid);
4926 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4928 if (!conn->hs_enabled)
4931 chan = l2cap_get_chan_by_dcid(conn, icid);
4933 rsp.icid = cpu_to_le16(icid);
4934 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4935 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4940 chan->ident = cmd->ident;
4942 if (chan->scid < L2CAP_CID_DYN_START ||
4943 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4944 (chan->mode != L2CAP_MODE_ERTM &&
4945 chan->mode != L2CAP_MODE_STREAMING)) {
4946 result = L2CAP_MR_NOT_ALLOWED;
4947 goto send_move_response;
4950 if (chan->local_amp_id == req->dest_amp_id) {
4951 result = L2CAP_MR_SAME_ID;
4952 goto send_move_response;
4955 if (req->dest_amp_id != AMP_ID_BREDR) {
4956 struct hci_dev *hdev;
4957 hdev = hci_dev_get(req->dest_amp_id);
4958 if (!hdev || hdev->dev_type != HCI_AMP ||
4959 !test_bit(HCI_UP, &hdev->flags)) {
4963 result = L2CAP_MR_BAD_ID;
4964 goto send_move_response;
4969 /* Detect a move collision. Only send a collision response
4970 * if this side has "lost", otherwise proceed with the move.
4971 * The winner has the larger bd_addr.
4973 if ((__chan_is_moving(chan) ||
4974 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4975 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4976 result = L2CAP_MR_COLLISION;
4977 goto send_move_response;
4980 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4981 l2cap_move_setup(chan);
4982 chan->move_id = req->dest_amp_id;
4985 if (req->dest_amp_id == AMP_ID_BREDR) {
4986 /* Moving to BR/EDR */
4987 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4988 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4989 result = L2CAP_MR_PEND;
4991 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4992 result = L2CAP_MR_SUCCESS;
4995 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4996 /* Placeholder - uncomment when amp functions are available */
4997 /*amp_accept_physical(chan, req->dest_amp_id);*/
4998 result = L2CAP_MR_PEND;
5002 l2cap_send_move_chan_rsp(chan, result);
5004 l2cap_chan_unlock(chan);
5009 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5011 struct l2cap_chan *chan;
5012 struct hci_chan *hchan = NULL;
5014 chan = l2cap_get_chan_by_scid(conn, icid);
5016 l2cap_send_move_chan_cfm_icid(conn, icid);
5020 __clear_chan_timer(chan);
5021 if (result == L2CAP_MR_PEND)
5022 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5024 switch (chan->move_state) {
5025 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5026 /* Move confirm will be sent when logical link
5029 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5031 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5032 if (result == L2CAP_MR_PEND) {
5034 } else if (test_bit(CONN_LOCAL_BUSY,
5035 &chan->conn_state)) {
5036 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5038 /* Logical link is up or moving to BR/EDR,
5041 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5042 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5045 case L2CAP_MOVE_WAIT_RSP:
5047 if (result == L2CAP_MR_SUCCESS) {
5048 /* Remote is ready, send confirm immediately
5049 * after logical link is ready
5051 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5053 /* Both logical link and move success
5054 * are required to confirm
5056 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5059 /* Placeholder - get hci_chan for logical link */
5061 /* Logical link not available */
5062 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5066 /* If the logical link is not yet connected, do not
5067 * send confirmation.
5069 if (hchan->state != BT_CONNECTED)
5072 /* Logical link is already ready to go */
5074 chan->hs_hcon = hchan->conn;
5075 chan->hs_hcon->l2cap_data = chan->conn;
5077 if (result == L2CAP_MR_SUCCESS) {
5078 /* Can confirm now */
5079 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5081 /* Now only need move success
5084 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5087 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5090 /* Any other amp move state means the move failed. */
5091 chan->move_id = chan->local_amp_id;
5092 l2cap_move_done(chan);
5093 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5096 l2cap_chan_unlock(chan);
5099 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5102 struct l2cap_chan *chan;
5104 chan = l2cap_get_chan_by_ident(conn, ident);
5106 /* Could not locate channel, icid is best guess */
5107 l2cap_send_move_chan_cfm_icid(conn, icid);
5111 __clear_chan_timer(chan);
5113 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5114 if (result == L2CAP_MR_COLLISION) {
5115 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5117 /* Cleanup - cancel move */
5118 chan->move_id = chan->local_amp_id;
5119 l2cap_move_done(chan);
5123 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5125 l2cap_chan_unlock(chan);
5128 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5129 struct l2cap_cmd_hdr *cmd,
5130 u16 cmd_len, void *data)
5132 struct l2cap_move_chan_rsp *rsp = data;
5135 if (cmd_len != sizeof(*rsp))
5138 icid = le16_to_cpu(rsp->icid);
5139 result = le16_to_cpu(rsp->result);
5141 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5143 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5144 l2cap_move_continue(conn, icid, result);
5146 l2cap_move_fail(conn, cmd->ident, icid, result);
5151 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5152 struct l2cap_cmd_hdr *cmd,
5153 u16 cmd_len, void *data)
5155 struct l2cap_move_chan_cfm *cfm = data;
5156 struct l2cap_chan *chan;
5159 if (cmd_len != sizeof(*cfm))
5162 icid = le16_to_cpu(cfm->icid);
5163 result = le16_to_cpu(cfm->result);
5165 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5167 chan = l2cap_get_chan_by_dcid(conn, icid);
5169 /* Spec requires a response even if the icid was not found */
5170 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5174 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5175 if (result == L2CAP_MC_CONFIRMED) {
5176 chan->local_amp_id = chan->move_id;
5177 if (chan->local_amp_id == AMP_ID_BREDR)
5178 __release_logical_link(chan);
5180 chan->move_id = chan->local_amp_id;
5183 l2cap_move_done(chan);
5186 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5188 l2cap_chan_unlock(chan);
5193 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5194 struct l2cap_cmd_hdr *cmd,
5195 u16 cmd_len, void *data)
5197 struct l2cap_move_chan_cfm_rsp *rsp = data;
5198 struct l2cap_chan *chan;
5201 if (cmd_len != sizeof(*rsp))
5204 icid = le16_to_cpu(rsp->icid);
5206 BT_DBG("icid 0x%4.4x", icid);
5208 chan = l2cap_get_chan_by_scid(conn, icid);
5212 __clear_chan_timer(chan);
5214 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5215 chan->local_amp_id = chan->move_id;
5217 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5218 __release_logical_link(chan);
5220 l2cap_move_done(chan);
5223 l2cap_chan_unlock(chan);
5228 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5233 if (min > max || min < 6 || max > 3200)
5236 if (to_multiplier < 10 || to_multiplier > 3200)
5239 if (max >= to_multiplier * 8)
5242 max_latency = (to_multiplier * 8 / max) - 1;
5243 if (latency > 499 || latency > max_latency)
5249 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5250 struct l2cap_cmd_hdr *cmd,
5251 u16 cmd_len, u8 *data)
5253 struct hci_conn *hcon = conn->hcon;
5254 struct l2cap_conn_param_update_req *req;
5255 struct l2cap_conn_param_update_rsp rsp;
5256 u16 min, max, latency, to_multiplier;
5259 if (!(hcon->link_mode & HCI_LM_MASTER))
5262 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5265 req = (struct l2cap_conn_param_update_req *) data;
5266 min = __le16_to_cpu(req->min);
5267 max = __le16_to_cpu(req->max);
5268 latency = __le16_to_cpu(req->latency);
5269 to_multiplier = __le16_to_cpu(req->to_multiplier);
5271 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5272 min, max, latency, to_multiplier);
5274 memset(&rsp, 0, sizeof(rsp));
5276 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5278 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5280 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5282 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5286 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5291 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5292 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5295 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5296 u16 dcid, mtu, mps, credits, result;
5297 struct l2cap_chan *chan;
5300 if (cmd_len < sizeof(*rsp))
5303 dcid = __le16_to_cpu(rsp->dcid);
5304 mtu = __le16_to_cpu(rsp->mtu);
5305 mps = __le16_to_cpu(rsp->mps);
5306 credits = __le16_to_cpu(rsp->credits);
5307 result = __le16_to_cpu(rsp->result);
5309 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5312 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5313 dcid, mtu, mps, credits, result);
5315 mutex_lock(&conn->chan_lock);
5317 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5325 l2cap_chan_lock(chan);
5328 case L2CAP_CR_SUCCESS:
5332 chan->remote_mps = mps;
5333 l2cap_chan_ready(chan);
5337 l2cap_chan_del(chan, ECONNREFUSED);
5341 l2cap_chan_unlock(chan);
5344 mutex_unlock(&conn->chan_lock);
5349 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5350 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5355 switch (cmd->code) {
5356 case L2CAP_COMMAND_REJ:
5357 l2cap_command_rej(conn, cmd, cmd_len, data);
5360 case L2CAP_CONN_REQ:
5361 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5364 case L2CAP_CONN_RSP:
5365 case L2CAP_CREATE_CHAN_RSP:
5366 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5369 case L2CAP_CONF_REQ:
5370 err = l2cap_config_req(conn, cmd, cmd_len, data);
5373 case L2CAP_CONF_RSP:
5374 l2cap_config_rsp(conn, cmd, cmd_len, data);
5377 case L2CAP_DISCONN_REQ:
5378 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5381 case L2CAP_DISCONN_RSP:
5382 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5385 case L2CAP_ECHO_REQ:
5386 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5389 case L2CAP_ECHO_RSP:
5392 case L2CAP_INFO_REQ:
5393 err = l2cap_information_req(conn, cmd, cmd_len, data);
5396 case L2CAP_INFO_RSP:
5397 l2cap_information_rsp(conn, cmd, cmd_len, data);
5400 case L2CAP_CREATE_CHAN_REQ:
5401 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5404 case L2CAP_MOVE_CHAN_REQ:
5405 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5408 case L2CAP_MOVE_CHAN_RSP:
5409 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5412 case L2CAP_MOVE_CHAN_CFM:
5413 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5416 case L2CAP_MOVE_CHAN_CFM_RSP:
5417 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5421 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5429 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5430 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5433 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5434 struct l2cap_le_conn_rsp rsp;
5435 struct l2cap_chan *chan, *pchan;
5436 u16 dcid, scid, mtu, mps;
5440 if (cmd_len != sizeof(*req))
5443 scid = __le16_to_cpu(req->scid);
5444 mtu = __le16_to_cpu(req->mtu);
5445 mps = __le16_to_cpu(req->mps);
5449 if (mtu < 23 || mps < 23)
5452 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5455 /* Check if we have socket listening on psm */
5456 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5457 &conn->hcon->dst, LE_LINK);
5459 result = L2CAP_CR_BAD_PSM;
5464 mutex_lock(&conn->chan_lock);
5465 l2cap_chan_lock(pchan);
5467 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5468 result = L2CAP_CR_AUTHENTICATION;
5470 goto response_unlock;
5473 /* Check if we already have channel with that dcid */
5474 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5475 result = L2CAP_CR_NO_MEM;
5477 goto response_unlock;
5480 chan = pchan->ops->new_connection(pchan);
5482 result = L2CAP_CR_NO_MEM;
5483 goto response_unlock;
5486 bacpy(&chan->src, &conn->hcon->src);
5487 bacpy(&chan->dst, &conn->hcon->dst);
5488 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5489 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5493 chan->remote_mps = mps;
5495 __l2cap_chan_add(conn, chan);
5498 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5500 chan->ident = cmd->ident;
5502 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5503 l2cap_state_change(chan, BT_CONNECT2);
5504 result = L2CAP_CR_PEND;
5505 chan->ops->defer(chan);
5507 l2cap_chan_ready(chan);
5508 result = L2CAP_CR_SUCCESS;
5512 l2cap_chan_unlock(pchan);
5513 mutex_unlock(&conn->chan_lock);
5515 if (result == L2CAP_CR_PEND)
5520 rsp.mtu = cpu_to_le16(chan->imtu);
5521 rsp.mps = __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS);
5527 rsp.dcid = cpu_to_le16(dcid);
5528 rsp.credits = __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS);
5529 rsp.result = cpu_to_le16(result);
5531 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5536 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5537 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5542 switch (cmd->code) {
5543 case L2CAP_COMMAND_REJ:
5546 case L2CAP_CONN_PARAM_UPDATE_REQ:
5547 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5550 case L2CAP_CONN_PARAM_UPDATE_RSP:
5553 case L2CAP_LE_CONN_RSP:
5554 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5557 case L2CAP_LE_CONN_REQ:
5558 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5561 case L2CAP_DISCONN_REQ:
5562 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5565 case L2CAP_DISCONN_RSP:
5566 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5570 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5578 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5579 struct sk_buff *skb)
5581 struct hci_conn *hcon = conn->hcon;
5582 struct l2cap_cmd_hdr *cmd;
5586 if (hcon->type != LE_LINK)
5589 if (skb->len < L2CAP_CMD_HDR_SIZE)
5592 cmd = (void *) skb->data;
5593 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5595 len = le16_to_cpu(cmd->len);
5597 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5599 if (len != skb->len || !cmd->ident) {
5600 BT_DBG("corrupted command");
5604 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5606 struct l2cap_cmd_rej_unk rej;
5608 BT_ERR("Wrong link type (%d)", err);
5610 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5611 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5619 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5620 struct sk_buff *skb)
5622 struct hci_conn *hcon = conn->hcon;
5623 u8 *data = skb->data;
5625 struct l2cap_cmd_hdr cmd;
5628 l2cap_raw_recv(conn, skb);
5630 if (hcon->type != ACL_LINK)
5633 while (len >= L2CAP_CMD_HDR_SIZE) {
5635 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5636 data += L2CAP_CMD_HDR_SIZE;
5637 len -= L2CAP_CMD_HDR_SIZE;
5639 cmd_len = le16_to_cpu(cmd.len);
5641 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5644 if (cmd_len > len || !cmd.ident) {
5645 BT_DBG("corrupted command");
5649 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5651 struct l2cap_cmd_rej_unk rej;
5653 BT_ERR("Wrong link type (%d)", err);
5655 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5656 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5668 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5670 u16 our_fcs, rcv_fcs;
5673 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5674 hdr_size = L2CAP_EXT_HDR_SIZE;
5676 hdr_size = L2CAP_ENH_HDR_SIZE;
5678 if (chan->fcs == L2CAP_FCS_CRC16) {
5679 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5680 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5681 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5683 if (our_fcs != rcv_fcs)
5689 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5691 struct l2cap_ctrl control;
5693 BT_DBG("chan %p", chan);
5695 memset(&control, 0, sizeof(control));
5698 control.reqseq = chan->buffer_seq;
5699 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5701 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5702 control.super = L2CAP_SUPER_RNR;
5703 l2cap_send_sframe(chan, &control);
5706 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5707 chan->unacked_frames > 0)
5708 __set_retrans_timer(chan);
5710 /* Send pending iframes */
5711 l2cap_ertm_send(chan);
5713 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5714 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5715 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5718 control.super = L2CAP_SUPER_RR;
5719 l2cap_send_sframe(chan, &control);
5723 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5724 struct sk_buff **last_frag)
5726 /* skb->len reflects data in skb as well as all fragments
5727 * skb->data_len reflects only data in fragments
5729 if (!skb_has_frag_list(skb))
5730 skb_shinfo(skb)->frag_list = new_frag;
5732 new_frag->next = NULL;
5734 (*last_frag)->next = new_frag;
5735 *last_frag = new_frag;
5737 skb->len += new_frag->len;
5738 skb->data_len += new_frag->len;
5739 skb->truesize += new_frag->truesize;
5742 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5743 struct l2cap_ctrl *control)
5747 switch (control->sar) {
5748 case L2CAP_SAR_UNSEGMENTED:
5752 err = chan->ops->recv(chan, skb);
5755 case L2CAP_SAR_START:
5759 chan->sdu_len = get_unaligned_le16(skb->data);
5760 skb_pull(skb, L2CAP_SDULEN_SIZE);
5762 if (chan->sdu_len > chan->imtu) {
5767 if (skb->len >= chan->sdu_len)
5771 chan->sdu_last_frag = skb;
5777 case L2CAP_SAR_CONTINUE:
5781 append_skb_frag(chan->sdu, skb,
5782 &chan->sdu_last_frag);
5785 if (chan->sdu->len >= chan->sdu_len)
5795 append_skb_frag(chan->sdu, skb,
5796 &chan->sdu_last_frag);
5799 if (chan->sdu->len != chan->sdu_len)
5802 err = chan->ops->recv(chan, chan->sdu);
5805 /* Reassembly complete */
5807 chan->sdu_last_frag = NULL;
5815 kfree_skb(chan->sdu);
5817 chan->sdu_last_frag = NULL;
5824 static int l2cap_resegment(struct l2cap_chan *chan)
5830 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5834 if (chan->mode != L2CAP_MODE_ERTM)
5837 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5838 l2cap_tx(chan, NULL, NULL, event);
5841 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5844 /* Pass sequential frames to l2cap_reassemble_sdu()
5845 * until a gap is encountered.
5848 BT_DBG("chan %p", chan);
5850 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5851 struct sk_buff *skb;
5852 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5853 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5855 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5860 skb_unlink(skb, &chan->srej_q);
5861 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5862 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5867 if (skb_queue_empty(&chan->srej_q)) {
5868 chan->rx_state = L2CAP_RX_STATE_RECV;
5869 l2cap_send_ack(chan);
5875 static void l2cap_handle_srej(struct l2cap_chan *chan,
5876 struct l2cap_ctrl *control)
5878 struct sk_buff *skb;
5880 BT_DBG("chan %p, control %p", chan, control);
5882 if (control->reqseq == chan->next_tx_seq) {
5883 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5884 l2cap_send_disconn_req(chan, ECONNRESET);
5888 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5891 BT_DBG("Seq %d not available for retransmission",
5896 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5897 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5898 l2cap_send_disconn_req(chan, ECONNRESET);
5902 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5904 if (control->poll) {
5905 l2cap_pass_to_tx(chan, control);
5907 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5908 l2cap_retransmit(chan, control);
5909 l2cap_ertm_send(chan);
5911 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5912 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5913 chan->srej_save_reqseq = control->reqseq;
5916 l2cap_pass_to_tx_fbit(chan, control);
5918 if (control->final) {
5919 if (chan->srej_save_reqseq != control->reqseq ||
5920 !test_and_clear_bit(CONN_SREJ_ACT,
5922 l2cap_retransmit(chan, control);
5924 l2cap_retransmit(chan, control);
5925 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5926 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5927 chan->srej_save_reqseq = control->reqseq;
5933 static void l2cap_handle_rej(struct l2cap_chan *chan,
5934 struct l2cap_ctrl *control)
5936 struct sk_buff *skb;
5938 BT_DBG("chan %p, control %p", chan, control);
5940 if (control->reqseq == chan->next_tx_seq) {
5941 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5942 l2cap_send_disconn_req(chan, ECONNRESET);
5946 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5948 if (chan->max_tx && skb &&
5949 bt_cb(skb)->control.retries >= chan->max_tx) {
5950 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5951 l2cap_send_disconn_req(chan, ECONNRESET);
5955 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5957 l2cap_pass_to_tx(chan, control);
5959 if (control->final) {
5960 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5961 l2cap_retransmit_all(chan, control);
5963 l2cap_retransmit_all(chan, control);
5964 l2cap_ertm_send(chan);
5965 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5966 set_bit(CONN_REJ_ACT, &chan->conn_state);
5970 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5972 BT_DBG("chan %p, txseq %d", chan, txseq);
5974 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5975 chan->expected_tx_seq);
5977 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5978 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5980 /* See notes below regarding "double poll" and
5983 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5984 BT_DBG("Invalid/Ignore - after SREJ");
5985 return L2CAP_TXSEQ_INVALID_IGNORE;
5987 BT_DBG("Invalid - in window after SREJ sent");
5988 return L2CAP_TXSEQ_INVALID;
5992 if (chan->srej_list.head == txseq) {
5993 BT_DBG("Expected SREJ");
5994 return L2CAP_TXSEQ_EXPECTED_SREJ;
5997 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5998 BT_DBG("Duplicate SREJ - txseq already stored");
5999 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6002 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6003 BT_DBG("Unexpected SREJ - not requested");
6004 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6008 if (chan->expected_tx_seq == txseq) {
6009 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6011 BT_DBG("Invalid - txseq outside tx window");
6012 return L2CAP_TXSEQ_INVALID;
6015 return L2CAP_TXSEQ_EXPECTED;
6019 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6020 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6021 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6022 return L2CAP_TXSEQ_DUPLICATE;
6025 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6026 /* A source of invalid packets is a "double poll" condition,
6027 * where delays cause us to send multiple poll packets. If
6028 * the remote stack receives and processes both polls,
6029 * sequence numbers can wrap around in such a way that a
6030 * resent frame has a sequence number that looks like new data
6031 * with a sequence gap. This would trigger an erroneous SREJ
6034 * Fortunately, this is impossible with a tx window that's
6035 * less than half of the maximum sequence number, which allows
6036 * invalid frames to be safely ignored.
6038 * With tx window sizes greater than half of the tx window
6039 * maximum, the frame is invalid and cannot be ignored. This
6040 * causes a disconnect.
6043 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6044 BT_DBG("Invalid/Ignore - txseq outside tx window");
6045 return L2CAP_TXSEQ_INVALID_IGNORE;
6047 BT_DBG("Invalid - txseq outside tx window");
6048 return L2CAP_TXSEQ_INVALID;
6051 BT_DBG("Unexpected - txseq indicates missing frames");
6052 return L2CAP_TXSEQ_UNEXPECTED;
6056 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6057 struct l2cap_ctrl *control,
6058 struct sk_buff *skb, u8 event)
6061 bool skb_in_use = false;
6063 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6067 case L2CAP_EV_RECV_IFRAME:
6068 switch (l2cap_classify_txseq(chan, control->txseq)) {
6069 case L2CAP_TXSEQ_EXPECTED:
6070 l2cap_pass_to_tx(chan, control);
6072 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6073 BT_DBG("Busy, discarding expected seq %d",
6078 chan->expected_tx_seq = __next_seq(chan,
6081 chan->buffer_seq = chan->expected_tx_seq;
6084 err = l2cap_reassemble_sdu(chan, skb, control);
6088 if (control->final) {
6089 if (!test_and_clear_bit(CONN_REJ_ACT,
6090 &chan->conn_state)) {
6092 l2cap_retransmit_all(chan, control);
6093 l2cap_ertm_send(chan);
6097 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6098 l2cap_send_ack(chan);
6100 case L2CAP_TXSEQ_UNEXPECTED:
6101 l2cap_pass_to_tx(chan, control);
6103 /* Can't issue SREJ frames in the local busy state.
6104 * Drop this frame, it will be seen as missing
6105 * when local busy is exited.
6107 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6108 BT_DBG("Busy, discarding unexpected seq %d",
6113 /* There was a gap in the sequence, so an SREJ
6114 * must be sent for each missing frame. The
6115 * current frame is stored for later use.
6117 skb_queue_tail(&chan->srej_q, skb);
6119 BT_DBG("Queued %p (queue len %d)", skb,
6120 skb_queue_len(&chan->srej_q));
6122 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6123 l2cap_seq_list_clear(&chan->srej_list);
6124 l2cap_send_srej(chan, control->txseq);
6126 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6128 case L2CAP_TXSEQ_DUPLICATE:
6129 l2cap_pass_to_tx(chan, control);
6131 case L2CAP_TXSEQ_INVALID_IGNORE:
6133 case L2CAP_TXSEQ_INVALID:
6135 l2cap_send_disconn_req(chan, ECONNRESET);
6139 case L2CAP_EV_RECV_RR:
6140 l2cap_pass_to_tx(chan, control);
6141 if (control->final) {
6142 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6144 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6145 !__chan_is_moving(chan)) {
6147 l2cap_retransmit_all(chan, control);
6150 l2cap_ertm_send(chan);
6151 } else if (control->poll) {
6152 l2cap_send_i_or_rr_or_rnr(chan);
6154 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6155 &chan->conn_state) &&
6156 chan->unacked_frames)
6157 __set_retrans_timer(chan);
6159 l2cap_ertm_send(chan);
6162 case L2CAP_EV_RECV_RNR:
6163 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6164 l2cap_pass_to_tx(chan, control);
6165 if (control && control->poll) {
6166 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6167 l2cap_send_rr_or_rnr(chan, 0);
6169 __clear_retrans_timer(chan);
6170 l2cap_seq_list_clear(&chan->retrans_list);
6172 case L2CAP_EV_RECV_REJ:
6173 l2cap_handle_rej(chan, control);
6175 case L2CAP_EV_RECV_SREJ:
6176 l2cap_handle_srej(chan, control);
6182 if (skb && !skb_in_use) {
6183 BT_DBG("Freeing %p", skb);
6190 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6191 struct l2cap_ctrl *control,
6192 struct sk_buff *skb, u8 event)
6195 u16 txseq = control->txseq;
6196 bool skb_in_use = false;
6198 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6202 case L2CAP_EV_RECV_IFRAME:
6203 switch (l2cap_classify_txseq(chan, txseq)) {
6204 case L2CAP_TXSEQ_EXPECTED:
6205 /* Keep frame for reassembly later */
6206 l2cap_pass_to_tx(chan, control);
6207 skb_queue_tail(&chan->srej_q, skb);
6209 BT_DBG("Queued %p (queue len %d)", skb,
6210 skb_queue_len(&chan->srej_q));
6212 chan->expected_tx_seq = __next_seq(chan, txseq);
6214 case L2CAP_TXSEQ_EXPECTED_SREJ:
6215 l2cap_seq_list_pop(&chan->srej_list);
6217 l2cap_pass_to_tx(chan, control);
6218 skb_queue_tail(&chan->srej_q, skb);
6220 BT_DBG("Queued %p (queue len %d)", skb,
6221 skb_queue_len(&chan->srej_q));
6223 err = l2cap_rx_queued_iframes(chan);
6228 case L2CAP_TXSEQ_UNEXPECTED:
6229 /* Got a frame that can't be reassembled yet.
6230 * Save it for later, and send SREJs to cover
6231 * the missing frames.
6233 skb_queue_tail(&chan->srej_q, skb);
6235 BT_DBG("Queued %p (queue len %d)", skb,
6236 skb_queue_len(&chan->srej_q));
6238 l2cap_pass_to_tx(chan, control);
6239 l2cap_send_srej(chan, control->txseq);
6241 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6242 /* This frame was requested with an SREJ, but
6243 * some expected retransmitted frames are
6244 * missing. Request retransmission of missing
6247 skb_queue_tail(&chan->srej_q, skb);
6249 BT_DBG("Queued %p (queue len %d)", skb,
6250 skb_queue_len(&chan->srej_q));
6252 l2cap_pass_to_tx(chan, control);
6253 l2cap_send_srej_list(chan, control->txseq);
6255 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6256 /* We've already queued this frame. Drop this copy. */
6257 l2cap_pass_to_tx(chan, control);
6259 case L2CAP_TXSEQ_DUPLICATE:
6260 /* Expecting a later sequence number, so this frame
6261 * was already received. Ignore it completely.
6264 case L2CAP_TXSEQ_INVALID_IGNORE:
6266 case L2CAP_TXSEQ_INVALID:
6268 l2cap_send_disconn_req(chan, ECONNRESET);
6272 case L2CAP_EV_RECV_RR:
6273 l2cap_pass_to_tx(chan, control);
6274 if (control->final) {
6275 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6277 if (!test_and_clear_bit(CONN_REJ_ACT,
6278 &chan->conn_state)) {
6280 l2cap_retransmit_all(chan, control);
6283 l2cap_ertm_send(chan);
6284 } else if (control->poll) {
6285 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6286 &chan->conn_state) &&
6287 chan->unacked_frames) {
6288 __set_retrans_timer(chan);
6291 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6292 l2cap_send_srej_tail(chan);
6294 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6295 &chan->conn_state) &&
6296 chan->unacked_frames)
6297 __set_retrans_timer(chan);
6299 l2cap_send_ack(chan);
6302 case L2CAP_EV_RECV_RNR:
6303 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6304 l2cap_pass_to_tx(chan, control);
6305 if (control->poll) {
6306 l2cap_send_srej_tail(chan);
6308 struct l2cap_ctrl rr_control;
6309 memset(&rr_control, 0, sizeof(rr_control));
6310 rr_control.sframe = 1;
6311 rr_control.super = L2CAP_SUPER_RR;
6312 rr_control.reqseq = chan->buffer_seq;
6313 l2cap_send_sframe(chan, &rr_control);
6317 case L2CAP_EV_RECV_REJ:
6318 l2cap_handle_rej(chan, control);
6320 case L2CAP_EV_RECV_SREJ:
6321 l2cap_handle_srej(chan, control);
6325 if (skb && !skb_in_use) {
6326 BT_DBG("Freeing %p", skb);
6333 static int l2cap_finish_move(struct l2cap_chan *chan)
6335 BT_DBG("chan %p", chan);
6337 chan->rx_state = L2CAP_RX_STATE_RECV;
6340 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6342 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6344 return l2cap_resegment(chan);
6347 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6348 struct l2cap_ctrl *control,
6349 struct sk_buff *skb, u8 event)
6353 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6359 l2cap_process_reqseq(chan, control->reqseq);
6361 if (!skb_queue_empty(&chan->tx_q))
6362 chan->tx_send_head = skb_peek(&chan->tx_q);
6364 chan->tx_send_head = NULL;
6366 /* Rewind next_tx_seq to the point expected
6369 chan->next_tx_seq = control->reqseq;
6370 chan->unacked_frames = 0;
6372 err = l2cap_finish_move(chan);
6376 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6377 l2cap_send_i_or_rr_or_rnr(chan);
6379 if (event == L2CAP_EV_RECV_IFRAME)
6382 return l2cap_rx_state_recv(chan, control, NULL, event);
6385 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6386 struct l2cap_ctrl *control,
6387 struct sk_buff *skb, u8 event)
6391 if (!control->final)
6394 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6396 chan->rx_state = L2CAP_RX_STATE_RECV;
6397 l2cap_process_reqseq(chan, control->reqseq);
6399 if (!skb_queue_empty(&chan->tx_q))
6400 chan->tx_send_head = skb_peek(&chan->tx_q);
6402 chan->tx_send_head = NULL;
6404 /* Rewind next_tx_seq to the point expected
6407 chan->next_tx_seq = control->reqseq;
6408 chan->unacked_frames = 0;
6411 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6413 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6415 err = l2cap_resegment(chan);
6418 err = l2cap_rx_state_recv(chan, control, skb, event);
6423 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6425 /* Make sure reqseq is for a packet that has been sent but not acked */
6428 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6429 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6432 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6433 struct sk_buff *skb, u8 event)
6437 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6438 control, skb, event, chan->rx_state);
6440 if (__valid_reqseq(chan, control->reqseq)) {
6441 switch (chan->rx_state) {
6442 case L2CAP_RX_STATE_RECV:
6443 err = l2cap_rx_state_recv(chan, control, skb, event);
6445 case L2CAP_RX_STATE_SREJ_SENT:
6446 err = l2cap_rx_state_srej_sent(chan, control, skb,
6449 case L2CAP_RX_STATE_WAIT_P:
6450 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6452 case L2CAP_RX_STATE_WAIT_F:
6453 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6460 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6461 control->reqseq, chan->next_tx_seq,
6462 chan->expected_ack_seq);
6463 l2cap_send_disconn_req(chan, ECONNRESET);
6469 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6470 struct sk_buff *skb)
6474 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6477 if (l2cap_classify_txseq(chan, control->txseq) ==
6478 L2CAP_TXSEQ_EXPECTED) {
6479 l2cap_pass_to_tx(chan, control);
6481 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6482 __next_seq(chan, chan->buffer_seq));
6484 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6486 l2cap_reassemble_sdu(chan, skb, control);
6489 kfree_skb(chan->sdu);
6492 chan->sdu_last_frag = NULL;
6496 BT_DBG("Freeing %p", skb);
6501 chan->last_acked_seq = control->txseq;
6502 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6507 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6509 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6513 __unpack_control(chan, skb);
6518 * We can just drop the corrupted I-frame here.
6519 * Receiver will miss it and start proper recovery
6520 * procedures and ask for retransmission.
6522 if (l2cap_check_fcs(chan, skb))
6525 if (!control->sframe && control->sar == L2CAP_SAR_START)
6526 len -= L2CAP_SDULEN_SIZE;
6528 if (chan->fcs == L2CAP_FCS_CRC16)
6529 len -= L2CAP_FCS_SIZE;
6531 if (len > chan->mps) {
6532 l2cap_send_disconn_req(chan, ECONNRESET);
6536 if (!control->sframe) {
6539 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6540 control->sar, control->reqseq, control->final,
6543 /* Validate F-bit - F=0 always valid, F=1 only
6544 * valid in TX WAIT_F
6546 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6549 if (chan->mode != L2CAP_MODE_STREAMING) {
6550 event = L2CAP_EV_RECV_IFRAME;
6551 err = l2cap_rx(chan, control, skb, event);
6553 err = l2cap_stream_rx(chan, control, skb);
6557 l2cap_send_disconn_req(chan, ECONNRESET);
6559 const u8 rx_func_to_event[4] = {
6560 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6561 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6564 /* Only I-frames are expected in streaming mode */
6565 if (chan->mode == L2CAP_MODE_STREAMING)
6568 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6569 control->reqseq, control->final, control->poll,
6573 BT_ERR("Trailing bytes: %d in sframe", len);
6574 l2cap_send_disconn_req(chan, ECONNRESET);
6578 /* Validate F and P bits */
6579 if (control->final && (control->poll ||
6580 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6583 event = rx_func_to_event[control->super];
6584 if (l2cap_rx(chan, control, skb, event))
6585 l2cap_send_disconn_req(chan, ECONNRESET);
6595 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6596 struct sk_buff *skb)
6598 struct l2cap_chan *chan;
6600 chan = l2cap_get_chan_by_scid(conn, cid);
6602 if (cid == L2CAP_CID_A2MP) {
6603 chan = a2mp_channel_create(conn, skb);
6609 l2cap_chan_lock(chan);
6611 BT_DBG("unknown cid 0x%4.4x", cid);
6612 /* Drop packet and return */
6618 BT_DBG("chan %p, len %d", chan, skb->len);
6620 if (chan->state != BT_CONNECTED)
6623 switch (chan->mode) {
6624 case L2CAP_MODE_BASIC:
6625 /* If socket recv buffers overflows we drop data here
6626 * which is *bad* because L2CAP has to be reliable.
6627 * But we don't have any other choice. L2CAP doesn't
6628 * provide flow control mechanism. */
6630 if (chan->imtu < skb->len)
6633 if (!chan->ops->recv(chan, skb))
6637 case L2CAP_MODE_ERTM:
6638 case L2CAP_MODE_STREAMING:
6639 l2cap_data_rcv(chan, skb);
6643 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6651 l2cap_chan_unlock(chan);
6654 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6655 struct sk_buff *skb)
6657 struct hci_conn *hcon = conn->hcon;
6658 struct l2cap_chan *chan;
6660 if (hcon->type != ACL_LINK)
6663 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6668 BT_DBG("chan %p, len %d", chan, skb->len);
6670 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6673 if (chan->imtu < skb->len)
6676 /* Store remote BD_ADDR and PSM for msg_name */
6677 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6678 bt_cb(skb)->psm = psm;
6680 if (!chan->ops->recv(chan, skb))
6687 static void l2cap_att_channel(struct l2cap_conn *conn,
6688 struct sk_buff *skb)
6690 struct hci_conn *hcon = conn->hcon;
6691 struct l2cap_chan *chan;
6693 if (hcon->type != LE_LINK)
6696 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6697 &hcon->src, &hcon->dst);
6701 BT_DBG("chan %p, len %d", chan, skb->len);
6703 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6706 if (chan->imtu < skb->len)
6709 if (!chan->ops->recv(chan, skb))
6716 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6718 struct l2cap_hdr *lh = (void *) skb->data;
6722 skb_pull(skb, L2CAP_HDR_SIZE);
6723 cid = __le16_to_cpu(lh->cid);
6724 len = __le16_to_cpu(lh->len);
6726 if (len != skb->len) {
6731 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6734 case L2CAP_CID_SIGNALING:
6735 l2cap_sig_channel(conn, skb);
6738 case L2CAP_CID_CONN_LESS:
6739 psm = get_unaligned((__le16 *) skb->data);
6740 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6741 l2cap_conless_channel(conn, psm, skb);
6745 l2cap_att_channel(conn, skb);
6748 case L2CAP_CID_LE_SIGNALING:
6749 l2cap_le_sig_channel(conn, skb);
6753 if (smp_sig_channel(conn, skb))
6754 l2cap_conn_del(conn->hcon, EACCES);
6758 l2cap_data_channel(conn, cid, skb);
6763 /* ---- L2CAP interface with lower layer (HCI) ---- */
6765 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6767 int exact = 0, lm1 = 0, lm2 = 0;
6768 struct l2cap_chan *c;
6770 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6772 /* Find listening sockets and check their link_mode */
6773 read_lock(&chan_list_lock);
6774 list_for_each_entry(c, &chan_list, global_l) {
6775 if (c->state != BT_LISTEN)
6778 if (!bacmp(&c->src, &hdev->bdaddr)) {
6779 lm1 |= HCI_LM_ACCEPT;
6780 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6781 lm1 |= HCI_LM_MASTER;
6783 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6784 lm2 |= HCI_LM_ACCEPT;
6785 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6786 lm2 |= HCI_LM_MASTER;
6789 read_unlock(&chan_list_lock);
6791 return exact ? lm1 : lm2;
6794 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6796 struct l2cap_conn *conn;
6798 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6801 conn = l2cap_conn_add(hcon);
6803 l2cap_conn_ready(conn);
6805 l2cap_conn_del(hcon, bt_to_errno(status));
6809 int l2cap_disconn_ind(struct hci_conn *hcon)
6811 struct l2cap_conn *conn = hcon->l2cap_data;
6813 BT_DBG("hcon %p", hcon);
6816 return HCI_ERROR_REMOTE_USER_TERM;
6817 return conn->disc_reason;
6820 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6822 BT_DBG("hcon %p reason %d", hcon, reason);
6824 l2cap_conn_del(hcon, bt_to_errno(reason));
6827 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6829 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6832 if (encrypt == 0x00) {
6833 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6834 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6835 } else if (chan->sec_level == BT_SECURITY_HIGH)
6836 l2cap_chan_close(chan, ECONNREFUSED);
6838 if (chan->sec_level == BT_SECURITY_MEDIUM)
6839 __clear_chan_timer(chan);
6843 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6845 struct l2cap_conn *conn = hcon->l2cap_data;
6846 struct l2cap_chan *chan;
6851 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6853 if (hcon->type == LE_LINK) {
6854 if (!status && encrypt)
6855 smp_distribute_keys(conn, 0);
6856 cancel_delayed_work(&conn->security_timer);
6859 mutex_lock(&conn->chan_lock);
6861 list_for_each_entry(chan, &conn->chan_l, list) {
6862 l2cap_chan_lock(chan);
6864 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6865 state_to_string(chan->state));
6867 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6868 l2cap_chan_unlock(chan);
6872 if (chan->scid == L2CAP_CID_ATT) {
6873 if (!status && encrypt) {
6874 chan->sec_level = hcon->sec_level;
6875 l2cap_chan_ready(chan);
6878 l2cap_chan_unlock(chan);
6882 if (!__l2cap_no_conn_pending(chan)) {
6883 l2cap_chan_unlock(chan);
6887 if (!status && (chan->state == BT_CONNECTED ||
6888 chan->state == BT_CONFIG)) {
6889 chan->ops->resume(chan);
6890 l2cap_check_encryption(chan, encrypt);
6891 l2cap_chan_unlock(chan);
6895 if (chan->state == BT_CONNECT) {
6897 l2cap_start_connection(chan);
6899 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6900 } else if (chan->state == BT_CONNECT2) {
6901 struct l2cap_conn_rsp rsp;
6905 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6906 res = L2CAP_CR_PEND;
6907 stat = L2CAP_CS_AUTHOR_PEND;
6908 chan->ops->defer(chan);
6910 l2cap_state_change(chan, BT_CONFIG);
6911 res = L2CAP_CR_SUCCESS;
6912 stat = L2CAP_CS_NO_INFO;
6915 l2cap_state_change(chan, BT_DISCONN);
6916 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6917 res = L2CAP_CR_SEC_BLOCK;
6918 stat = L2CAP_CS_NO_INFO;
6921 rsp.scid = cpu_to_le16(chan->dcid);
6922 rsp.dcid = cpu_to_le16(chan->scid);
6923 rsp.result = cpu_to_le16(res);
6924 rsp.status = cpu_to_le16(stat);
6925 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6928 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6929 res == L2CAP_CR_SUCCESS) {
6931 set_bit(CONF_REQ_SENT, &chan->conf_state);
6932 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6934 l2cap_build_conf_req(chan, buf),
6936 chan->num_conf_req++;
6940 l2cap_chan_unlock(chan);
6943 mutex_unlock(&conn->chan_lock);
6948 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6950 struct l2cap_conn *conn = hcon->l2cap_data;
6951 struct l2cap_hdr *hdr;
6954 /* For AMP controller do not create l2cap conn */
6955 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6959 conn = l2cap_conn_add(hcon);
6964 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6968 case ACL_START_NO_FLUSH:
6971 BT_ERR("Unexpected start frame (len %d)", skb->len);
6972 kfree_skb(conn->rx_skb);
6973 conn->rx_skb = NULL;
6975 l2cap_conn_unreliable(conn, ECOMM);
6978 /* Start fragment always begin with Basic L2CAP header */
6979 if (skb->len < L2CAP_HDR_SIZE) {
6980 BT_ERR("Frame is too short (len %d)", skb->len);
6981 l2cap_conn_unreliable(conn, ECOMM);
6985 hdr = (struct l2cap_hdr *) skb->data;
6986 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6988 if (len == skb->len) {
6989 /* Complete frame received */
6990 l2cap_recv_frame(conn, skb);
6994 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6996 if (skb->len > len) {
6997 BT_ERR("Frame is too long (len %d, expected len %d)",
6999 l2cap_conn_unreliable(conn, ECOMM);
7003 /* Allocate skb for the complete frame (with header) */
7004 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7008 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7010 conn->rx_len = len - skb->len;
7014 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7016 if (!conn->rx_len) {
7017 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7018 l2cap_conn_unreliable(conn, ECOMM);
7022 if (skb->len > conn->rx_len) {
7023 BT_ERR("Fragment is too long (len %d, expected %d)",
7024 skb->len, conn->rx_len);
7025 kfree_skb(conn->rx_skb);
7026 conn->rx_skb = NULL;
7028 l2cap_conn_unreliable(conn, ECOMM);
7032 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7034 conn->rx_len -= skb->len;
7036 if (!conn->rx_len) {
7037 /* Complete frame received. l2cap_recv_frame
7038 * takes ownership of the skb so set the global
7039 * rx_skb pointer to NULL first.
7041 struct sk_buff *rx_skb = conn->rx_skb;
7042 conn->rx_skb = NULL;
7043 l2cap_recv_frame(conn, rx_skb);
7053 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7055 struct l2cap_chan *c;
7057 read_lock(&chan_list_lock);
7059 list_for_each_entry(c, &chan_list, global_l) {
7060 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7062 c->state, __le16_to_cpu(c->psm),
7063 c->scid, c->dcid, c->imtu, c->omtu,
7064 c->sec_level, c->mode);
7067 read_unlock(&chan_list_lock);
7072 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7074 return single_open(file, l2cap_debugfs_show, inode->i_private);
7077 static const struct file_operations l2cap_debugfs_fops = {
7078 .open = l2cap_debugfs_open,
7080 .llseek = seq_lseek,
7081 .release = single_release,
7084 static struct dentry *l2cap_debugfs;
7086 int __init l2cap_init(void)
7090 err = l2cap_init_sockets();
7094 if (IS_ERR_OR_NULL(bt_debugfs))
7097 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7098 NULL, &l2cap_debugfs_fops);
7103 void l2cap_exit(void)
7105 debugfs_remove(l2cap_debugfs);
7106 l2cap_cleanup_sockets();
7109 module_param(disable_ertm, bool, 0644);
7110 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");