2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
510 chan->scid = l2cap_alloc_cid(conn);
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546 l2cap_chan_hold(chan);
548 hci_conn_hold(conn->hcon);
550 list_add(&chan->list, &conn->chan_l);
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
562 struct l2cap_conn *conn = chan->conn;
564 __clear_chan_timer(chan);
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
573 l2cap_chan_put(chan);
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
591 chan->ops->teardown(chan, err);
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
597 case L2CAP_MODE_BASIC:
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
605 skb_queue_purge(&chan->srej_q);
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
622 struct l2cap_conn *conn = chan->conn;
624 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
626 switch (chan->state) {
628 chan->ops->teardown(chan, 0);
633 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 conn->hcon->type == ACL_LINK) {
635 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
636 l2cap_send_disconn_req(chan, reason);
638 l2cap_chan_del(chan, reason);
642 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
643 conn->hcon->type == ACL_LINK) {
644 struct l2cap_conn_rsp rsp;
647 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
648 result = L2CAP_CR_SEC_BLOCK;
650 result = L2CAP_CR_BAD_PSM;
652 l2cap_state_change(chan, BT_DISCONN);
654 rsp.scid = cpu_to_le16(chan->dcid);
655 rsp.dcid = cpu_to_le16(chan->scid);
656 rsp.result = cpu_to_le16(result);
657 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
658 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
662 l2cap_chan_del(chan, reason);
667 l2cap_chan_del(chan, reason);
671 chan->ops->teardown(chan, 0);
676 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
678 switch (chan->chan_type) {
680 switch (chan->sec_level) {
681 case BT_SECURITY_HIGH:
682 return HCI_AT_DEDICATED_BONDING_MITM;
683 case BT_SECURITY_MEDIUM:
684 return HCI_AT_DEDICATED_BONDING;
686 return HCI_AT_NO_BONDING;
689 case L2CAP_CHAN_CONN_LESS:
690 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
691 if (chan->sec_level == BT_SECURITY_LOW)
692 chan->sec_level = BT_SECURITY_SDP;
694 if (chan->sec_level == BT_SECURITY_HIGH)
695 return HCI_AT_NO_BONDING_MITM;
697 return HCI_AT_NO_BONDING;
699 case L2CAP_CHAN_CONN_ORIENTED:
700 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
701 if (chan->sec_level == BT_SECURITY_LOW)
702 chan->sec_level = BT_SECURITY_SDP;
704 if (chan->sec_level == BT_SECURITY_HIGH)
705 return HCI_AT_NO_BONDING_MITM;
707 return HCI_AT_NO_BONDING;
711 switch (chan->sec_level) {
712 case BT_SECURITY_HIGH:
713 return HCI_AT_GENERAL_BONDING_MITM;
714 case BT_SECURITY_MEDIUM:
715 return HCI_AT_GENERAL_BONDING;
717 return HCI_AT_NO_BONDING;
723 /* Service level security */
724 int l2cap_chan_check_security(struct l2cap_chan *chan)
726 struct l2cap_conn *conn = chan->conn;
729 if (conn->hcon->type == LE_LINK)
730 return smp_conn_security(conn->hcon, chan->sec_level);
732 auth_type = l2cap_get_auth_type(chan);
734 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
737 static u8 l2cap_get_ident(struct l2cap_conn *conn)
741 /* Get next available identificator.
742 * 1 - 128 are used by kernel.
743 * 129 - 199 are reserved.
744 * 200 - 254 are used by utilities like l2ping, etc.
747 spin_lock(&conn->lock);
749 if (++conn->tx_ident > 128)
754 spin_unlock(&conn->lock);
759 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
762 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
765 BT_DBG("code 0x%2.2x", code);
770 if (lmp_no_flush_capable(conn->hcon->hdev))
771 flags = ACL_START_NO_FLUSH;
775 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
776 skb->priority = HCI_PRIO_MAX;
778 hci_send_acl(conn->hchan, skb, flags);
781 static bool __chan_is_moving(struct l2cap_chan *chan)
783 return chan->move_state != L2CAP_MOVE_STABLE &&
784 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
787 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
789 struct hci_conn *hcon = chan->conn->hcon;
792 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
795 if (chan->hs_hcon && !__chan_is_moving(chan)) {
797 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
804 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
805 lmp_no_flush_capable(hcon->hdev))
806 flags = ACL_START_NO_FLUSH;
810 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
811 hci_send_acl(chan->conn->hchan, skb, flags);
814 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
816 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
817 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
819 if (enh & L2CAP_CTRL_FRAME_TYPE) {
822 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
823 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
830 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
831 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
838 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
840 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
841 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
843 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
846 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
847 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
854 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
855 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
862 static inline void __unpack_control(struct l2cap_chan *chan,
865 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
866 __unpack_extended_control(get_unaligned_le32(skb->data),
867 &bt_cb(skb)->control);
868 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
870 __unpack_enhanced_control(get_unaligned_le16(skb->data),
871 &bt_cb(skb)->control);
872 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
876 static u32 __pack_extended_control(struct l2cap_ctrl *control)
880 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
881 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
883 if (control->sframe) {
884 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
885 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
886 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
888 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
889 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
895 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
899 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
900 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
902 if (control->sframe) {
903 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
904 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
905 packed |= L2CAP_CTRL_FRAME_TYPE;
907 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
908 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
914 static inline void __pack_control(struct l2cap_chan *chan,
915 struct l2cap_ctrl *control,
918 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
919 put_unaligned_le32(__pack_extended_control(control),
920 skb->data + L2CAP_HDR_SIZE);
922 put_unaligned_le16(__pack_enhanced_control(control),
923 skb->data + L2CAP_HDR_SIZE);
927 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
929 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
930 return L2CAP_EXT_HDR_SIZE;
932 return L2CAP_ENH_HDR_SIZE;
935 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
939 struct l2cap_hdr *lh;
940 int hlen = __ertm_hdr_size(chan);
942 if (chan->fcs == L2CAP_FCS_CRC16)
943 hlen += L2CAP_FCS_SIZE;
945 skb = bt_skb_alloc(hlen, GFP_KERNEL);
948 return ERR_PTR(-ENOMEM);
950 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
951 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
952 lh->cid = cpu_to_le16(chan->dcid);
954 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
955 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
957 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
959 if (chan->fcs == L2CAP_FCS_CRC16) {
960 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
961 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
964 skb->priority = HCI_PRIO_MAX;
968 static void l2cap_send_sframe(struct l2cap_chan *chan,
969 struct l2cap_ctrl *control)
974 BT_DBG("chan %p, control %p", chan, control);
976 if (!control->sframe)
979 if (__chan_is_moving(chan))
982 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
986 if (control->super == L2CAP_SUPER_RR)
987 clear_bit(CONN_RNR_SENT, &chan->conn_state);
988 else if (control->super == L2CAP_SUPER_RNR)
989 set_bit(CONN_RNR_SENT, &chan->conn_state);
991 if (control->super != L2CAP_SUPER_SREJ) {
992 chan->last_acked_seq = control->reqseq;
993 __clear_ack_timer(chan);
996 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
997 control->final, control->poll, control->super);
999 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1000 control_field = __pack_extended_control(control);
1002 control_field = __pack_enhanced_control(control);
1004 skb = l2cap_create_sframe_pdu(chan, control_field);
1006 l2cap_do_send(chan, skb);
1009 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1011 struct l2cap_ctrl control;
1013 BT_DBG("chan %p, poll %d", chan, poll);
1015 memset(&control, 0, sizeof(control));
1017 control.poll = poll;
1019 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1020 control.super = L2CAP_SUPER_RNR;
1022 control.super = L2CAP_SUPER_RR;
1024 control.reqseq = chan->buffer_seq;
1025 l2cap_send_sframe(chan, &control);
1028 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1030 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1033 static bool __amp_capable(struct l2cap_chan *chan)
1035 struct l2cap_conn *conn = chan->conn;
1036 struct hci_dev *hdev;
1037 bool amp_available = false;
1039 if (!conn->hs_enabled)
1042 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1045 read_lock(&hci_dev_list_lock);
1046 list_for_each_entry(hdev, &hci_dev_list, list) {
1047 if (hdev->amp_type != AMP_TYPE_BREDR &&
1048 test_bit(HCI_UP, &hdev->flags)) {
1049 amp_available = true;
1053 read_unlock(&hci_dev_list_lock);
1055 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1056 return amp_available;
1061 static bool l2cap_check_efs(struct l2cap_chan *chan)
1063 /* Check EFS parameters */
1067 void l2cap_send_conn_req(struct l2cap_chan *chan)
1069 struct l2cap_conn *conn = chan->conn;
1070 struct l2cap_conn_req req;
1072 req.scid = cpu_to_le16(chan->scid);
1073 req.psm = chan->psm;
1075 chan->ident = l2cap_get_ident(conn);
1077 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1079 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1082 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1084 struct l2cap_create_chan_req req;
1085 req.scid = cpu_to_le16(chan->scid);
1086 req.psm = chan->psm;
1087 req.amp_id = amp_id;
1089 chan->ident = l2cap_get_ident(chan->conn);
1091 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1095 static void l2cap_move_setup(struct l2cap_chan *chan)
1097 struct sk_buff *skb;
1099 BT_DBG("chan %p", chan);
1101 if (chan->mode != L2CAP_MODE_ERTM)
1104 __clear_retrans_timer(chan);
1105 __clear_monitor_timer(chan);
1106 __clear_ack_timer(chan);
1108 chan->retry_count = 0;
1109 skb_queue_walk(&chan->tx_q, skb) {
1110 if (bt_cb(skb)->control.retries)
1111 bt_cb(skb)->control.retries = 1;
1116 chan->expected_tx_seq = chan->buffer_seq;
1118 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1119 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1120 l2cap_seq_list_clear(&chan->retrans_list);
1121 l2cap_seq_list_clear(&chan->srej_list);
1122 skb_queue_purge(&chan->srej_q);
1124 chan->tx_state = L2CAP_TX_STATE_XMIT;
1125 chan->rx_state = L2CAP_RX_STATE_MOVE;
1127 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1130 static void l2cap_move_done(struct l2cap_chan *chan)
1132 u8 move_role = chan->move_role;
1133 BT_DBG("chan %p", chan);
1135 chan->move_state = L2CAP_MOVE_STABLE;
1136 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1138 if (chan->mode != L2CAP_MODE_ERTM)
1141 switch (move_role) {
1142 case L2CAP_MOVE_ROLE_INITIATOR:
1143 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1144 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1146 case L2CAP_MOVE_ROLE_RESPONDER:
1147 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1152 static void l2cap_chan_ready(struct l2cap_chan *chan)
1154 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1155 chan->conf_state = 0;
1156 __clear_chan_timer(chan);
1158 chan->state = BT_CONNECTED;
1160 chan->ops->ready(chan);
1163 static void l2cap_start_connection(struct l2cap_chan *chan)
1165 if (__amp_capable(chan)) {
1166 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1167 a2mp_discover_amp(chan);
1169 l2cap_send_conn_req(chan);
1173 static void l2cap_do_start(struct l2cap_chan *chan)
1175 struct l2cap_conn *conn = chan->conn;
1177 if (conn->hcon->type == LE_LINK) {
1178 l2cap_chan_ready(chan);
1182 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1183 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1186 if (l2cap_chan_check_security(chan) &&
1187 __l2cap_no_conn_pending(chan)) {
1188 l2cap_start_connection(chan);
1191 struct l2cap_info_req req;
1192 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1194 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1195 conn->info_ident = l2cap_get_ident(conn);
1197 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1199 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1204 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1206 u32 local_feat_mask = l2cap_feat_mask;
1208 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1211 case L2CAP_MODE_ERTM:
1212 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1213 case L2CAP_MODE_STREAMING:
1214 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1220 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1222 struct l2cap_conn *conn = chan->conn;
1223 struct l2cap_disconn_req req;
1228 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1229 __clear_retrans_timer(chan);
1230 __clear_monitor_timer(chan);
1231 __clear_ack_timer(chan);
1234 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1235 l2cap_state_change(chan, BT_DISCONN);
1239 req.dcid = cpu_to_le16(chan->dcid);
1240 req.scid = cpu_to_le16(chan->scid);
1241 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1244 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1247 /* ---- L2CAP connections ---- */
1248 static void l2cap_conn_start(struct l2cap_conn *conn)
1250 struct l2cap_chan *chan, *tmp;
1252 BT_DBG("conn %p", conn);
1254 mutex_lock(&conn->chan_lock);
1256 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1257 l2cap_chan_lock(chan);
1259 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1260 l2cap_chan_unlock(chan);
1264 if (chan->state == BT_CONNECT) {
1265 if (!l2cap_chan_check_security(chan) ||
1266 !__l2cap_no_conn_pending(chan)) {
1267 l2cap_chan_unlock(chan);
1271 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1272 && test_bit(CONF_STATE2_DEVICE,
1273 &chan->conf_state)) {
1274 l2cap_chan_close(chan, ECONNRESET);
1275 l2cap_chan_unlock(chan);
1279 l2cap_start_connection(chan);
1281 } else if (chan->state == BT_CONNECT2) {
1282 struct l2cap_conn_rsp rsp;
1284 rsp.scid = cpu_to_le16(chan->dcid);
1285 rsp.dcid = cpu_to_le16(chan->scid);
1287 if (l2cap_chan_check_security(chan)) {
1288 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1289 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1290 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1291 chan->ops->defer(chan);
1294 l2cap_state_change(chan, BT_CONFIG);
1295 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1296 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1299 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1300 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1303 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1306 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1307 rsp.result != L2CAP_CR_SUCCESS) {
1308 l2cap_chan_unlock(chan);
1312 set_bit(CONF_REQ_SENT, &chan->conf_state);
1313 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1314 l2cap_build_conf_req(chan, buf), buf);
1315 chan->num_conf_req++;
1318 l2cap_chan_unlock(chan);
1321 mutex_unlock(&conn->chan_lock);
1324 /* Find socket with cid and source/destination bdaddr.
1325 * Returns closest match, locked.
1327 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1331 struct l2cap_chan *c, *c1 = NULL;
1333 read_lock(&chan_list_lock);
1335 list_for_each_entry(c, &chan_list, global_l) {
1336 if (state && c->state != state)
1339 if (c->scid == cid) {
1340 int src_match, dst_match;
1341 int src_any, dst_any;
1344 src_match = !bacmp(&c->src, src);
1345 dst_match = !bacmp(&c->dst, dst);
1346 if (src_match && dst_match) {
1347 read_unlock(&chan_list_lock);
1352 src_any = !bacmp(&c->src, BDADDR_ANY);
1353 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1354 if ((src_match && dst_any) || (src_any && dst_match) ||
1355 (src_any && dst_any))
1360 read_unlock(&chan_list_lock);
1365 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1367 struct hci_conn *hcon = conn->hcon;
1368 struct l2cap_chan *chan, *pchan;
1373 /* Check if we have socket listening on cid */
1374 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1375 &hcon->src, &hcon->dst);
1379 /* Client ATT sockets should override the server one */
1380 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1383 dst_type = bdaddr_type(hcon, hcon->dst_type);
1385 /* If device is blocked, do not create a channel for it */
1386 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1389 l2cap_chan_lock(pchan);
1391 chan = pchan->ops->new_connection(pchan);
1395 chan->dcid = L2CAP_CID_ATT;
1397 bacpy(&chan->src, &hcon->src);
1398 bacpy(&chan->dst, &hcon->dst);
1399 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1400 chan->dst_type = dst_type;
1402 __l2cap_chan_add(conn, chan);
1405 l2cap_chan_unlock(pchan);
1408 static void l2cap_conn_ready(struct l2cap_conn *conn)
1410 struct l2cap_chan *chan;
1411 struct hci_conn *hcon = conn->hcon;
1413 BT_DBG("conn %p", conn);
1415 /* For outgoing pairing which doesn't necessarily have an
1416 * associated socket (e.g. mgmt_pair_device).
1418 if (hcon->out && hcon->type == LE_LINK)
1419 smp_conn_security(hcon, hcon->pending_sec_level);
1421 mutex_lock(&conn->chan_lock);
1423 if (hcon->type == LE_LINK)
1424 l2cap_le_conn_ready(conn);
1426 list_for_each_entry(chan, &conn->chan_l, list) {
1428 l2cap_chan_lock(chan);
1430 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1431 l2cap_chan_unlock(chan);
1435 if (hcon->type == LE_LINK) {
1436 if (smp_conn_security(hcon, chan->sec_level))
1437 l2cap_chan_ready(chan);
1439 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1440 l2cap_chan_ready(chan);
1442 } else if (chan->state == BT_CONNECT) {
1443 l2cap_do_start(chan);
1446 l2cap_chan_unlock(chan);
1449 mutex_unlock(&conn->chan_lock);
1452 /* Notify sockets that we cannot guaranty reliability anymore */
1453 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1455 struct l2cap_chan *chan;
1457 BT_DBG("conn %p", conn);
1459 mutex_lock(&conn->chan_lock);
1461 list_for_each_entry(chan, &conn->chan_l, list) {
1462 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1463 l2cap_chan_set_err(chan, err);
1466 mutex_unlock(&conn->chan_lock);
1469 static void l2cap_info_timeout(struct work_struct *work)
1471 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1474 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1475 conn->info_ident = 0;
1477 l2cap_conn_start(conn);
1482 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1483 * callback is called during registration. The ->remove callback is called
1484 * during unregistration.
1485 * An l2cap_user object can either be explicitly unregistered or when the
1486 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1487 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1488 * External modules must own a reference to the l2cap_conn object if they intend
1489 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1490 * any time if they don't.
1493 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1495 struct hci_dev *hdev = conn->hcon->hdev;
1498 /* We need to check whether l2cap_conn is registered. If it is not, we
1499 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1500 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1501 * relies on the parent hci_conn object to be locked. This itself relies
1502 * on the hci_dev object to be locked. So we must lock the hci device
1507 if (user->list.next || user->list.prev) {
1512 /* conn->hchan is NULL after l2cap_conn_del() was called */
1518 ret = user->probe(conn, user);
1522 list_add(&user->list, &conn->users);
1526 hci_dev_unlock(hdev);
1529 EXPORT_SYMBOL(l2cap_register_user);
1531 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1533 struct hci_dev *hdev = conn->hcon->hdev;
1537 if (!user->list.next || !user->list.prev)
1540 list_del(&user->list);
1541 user->list.next = NULL;
1542 user->list.prev = NULL;
1543 user->remove(conn, user);
1546 hci_dev_unlock(hdev);
1548 EXPORT_SYMBOL(l2cap_unregister_user);
1550 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1552 struct l2cap_user *user;
1554 while (!list_empty(&conn->users)) {
1555 user = list_first_entry(&conn->users, struct l2cap_user, list);
1556 list_del(&user->list);
1557 user->list.next = NULL;
1558 user->list.prev = NULL;
1559 user->remove(conn, user);
1563 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1565 struct l2cap_conn *conn = hcon->l2cap_data;
1566 struct l2cap_chan *chan, *l;
1571 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1573 kfree_skb(conn->rx_skb);
1575 l2cap_unregister_all_users(conn);
1577 mutex_lock(&conn->chan_lock);
1580 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1581 l2cap_chan_hold(chan);
1582 l2cap_chan_lock(chan);
1584 l2cap_chan_del(chan, err);
1586 l2cap_chan_unlock(chan);
1588 chan->ops->close(chan);
1589 l2cap_chan_put(chan);
1592 mutex_unlock(&conn->chan_lock);
1594 hci_chan_del(conn->hchan);
1596 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1597 cancel_delayed_work_sync(&conn->info_timer);
1599 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1600 cancel_delayed_work_sync(&conn->security_timer);
1601 smp_chan_destroy(conn);
1604 hcon->l2cap_data = NULL;
1606 l2cap_conn_put(conn);
1609 static void security_timeout(struct work_struct *work)
1611 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1612 security_timer.work);
1614 BT_DBG("conn %p", conn);
1616 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1617 smp_chan_destroy(conn);
1618 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1622 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1624 struct l2cap_conn *conn = hcon->l2cap_data;
1625 struct hci_chan *hchan;
1630 hchan = hci_chan_create(hcon);
1634 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1636 hci_chan_del(hchan);
1640 kref_init(&conn->ref);
1641 hcon->l2cap_data = conn;
1643 hci_conn_get(conn->hcon);
1644 conn->hchan = hchan;
1646 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1648 switch (hcon->type) {
1650 if (hcon->hdev->le_mtu) {
1651 conn->mtu = hcon->hdev->le_mtu;
1656 conn->mtu = hcon->hdev->acl_mtu;
1660 conn->feat_mask = 0;
1662 if (hcon->type == ACL_LINK)
1663 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1664 &hcon->hdev->dev_flags);
1666 spin_lock_init(&conn->lock);
1667 mutex_init(&conn->chan_lock);
1669 INIT_LIST_HEAD(&conn->chan_l);
1670 INIT_LIST_HEAD(&conn->users);
1672 if (hcon->type == LE_LINK)
1673 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1675 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1677 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1682 static void l2cap_conn_free(struct kref *ref)
1684 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1686 hci_conn_put(conn->hcon);
1690 void l2cap_conn_get(struct l2cap_conn *conn)
1692 kref_get(&conn->ref);
1694 EXPORT_SYMBOL(l2cap_conn_get);
1696 void l2cap_conn_put(struct l2cap_conn *conn)
1698 kref_put(&conn->ref, l2cap_conn_free);
1700 EXPORT_SYMBOL(l2cap_conn_put);
1702 /* ---- Socket interface ---- */
1704 /* Find socket with psm and source / destination bdaddr.
1705 * Returns closest match.
1707 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1712 struct l2cap_chan *c, *c1 = NULL;
1714 read_lock(&chan_list_lock);
1716 list_for_each_entry(c, &chan_list, global_l) {
1717 if (state && c->state != state)
1720 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1723 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1726 if (c->psm == psm) {
1727 int src_match, dst_match;
1728 int src_any, dst_any;
1731 src_match = !bacmp(&c->src, src);
1732 dst_match = !bacmp(&c->dst, dst);
1733 if (src_match && dst_match) {
1734 read_unlock(&chan_list_lock);
1739 src_any = !bacmp(&c->src, BDADDR_ANY);
1740 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1741 if ((src_match && dst_any) || (src_any && dst_match) ||
1742 (src_any && dst_any))
1747 read_unlock(&chan_list_lock);
1752 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1753 bdaddr_t *dst, u8 dst_type)
1755 struct l2cap_conn *conn;
1756 struct hci_conn *hcon;
1757 struct hci_dev *hdev;
1761 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1762 dst_type, __le16_to_cpu(psm));
1764 hdev = hci_get_route(dst, &chan->src);
1766 return -EHOSTUNREACH;
1770 l2cap_chan_lock(chan);
1772 /* PSM must be odd and lsb of upper byte must be 0 */
1773 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1774 chan->chan_type != L2CAP_CHAN_RAW) {
1779 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1784 switch (chan->mode) {
1785 case L2CAP_MODE_BASIC:
1787 case L2CAP_MODE_ERTM:
1788 case L2CAP_MODE_STREAMING:
1797 switch (chan->state) {
1801 /* Already connecting */
1806 /* Already connected */
1820 /* Set destination address and psm */
1821 bacpy(&chan->dst, dst);
1822 chan->dst_type = dst_type;
1827 auth_type = l2cap_get_auth_type(chan);
1829 if (bdaddr_type_is_le(dst_type))
1830 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1831 chan->sec_level, auth_type);
1833 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1834 chan->sec_level, auth_type);
1837 err = PTR_ERR(hcon);
1841 conn = l2cap_conn_add(hcon);
1843 hci_conn_drop(hcon);
1848 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1849 hci_conn_drop(hcon);
1854 /* Update source addr of the socket */
1855 bacpy(&chan->src, &hcon->src);
1856 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1858 l2cap_chan_unlock(chan);
1859 l2cap_chan_add(conn, chan);
1860 l2cap_chan_lock(chan);
1862 /* l2cap_chan_add takes its own ref so we can drop this one */
1863 hci_conn_drop(hcon);
1865 l2cap_state_change(chan, BT_CONNECT);
1866 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1868 if (hcon->state == BT_CONNECTED) {
1869 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1870 __clear_chan_timer(chan);
1871 if (l2cap_chan_check_security(chan))
1872 l2cap_state_change(chan, BT_CONNECTED);
1874 l2cap_do_start(chan);
1880 l2cap_chan_unlock(chan);
1881 hci_dev_unlock(hdev);
1886 static void l2cap_monitor_timeout(struct work_struct *work)
1888 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1889 monitor_timer.work);
1891 BT_DBG("chan %p", chan);
1893 l2cap_chan_lock(chan);
1896 l2cap_chan_unlock(chan);
1897 l2cap_chan_put(chan);
1901 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1903 l2cap_chan_unlock(chan);
1904 l2cap_chan_put(chan);
1907 static void l2cap_retrans_timeout(struct work_struct *work)
1909 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1910 retrans_timer.work);
1912 BT_DBG("chan %p", chan);
1914 l2cap_chan_lock(chan);
1917 l2cap_chan_unlock(chan);
1918 l2cap_chan_put(chan);
1922 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1923 l2cap_chan_unlock(chan);
1924 l2cap_chan_put(chan);
1927 static void l2cap_streaming_send(struct l2cap_chan *chan,
1928 struct sk_buff_head *skbs)
1930 struct sk_buff *skb;
1931 struct l2cap_ctrl *control;
1933 BT_DBG("chan %p, skbs %p", chan, skbs);
1935 if (__chan_is_moving(chan))
1938 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1940 while (!skb_queue_empty(&chan->tx_q)) {
1942 skb = skb_dequeue(&chan->tx_q);
1944 bt_cb(skb)->control.retries = 1;
1945 control = &bt_cb(skb)->control;
1947 control->reqseq = 0;
1948 control->txseq = chan->next_tx_seq;
1950 __pack_control(chan, control, skb);
1952 if (chan->fcs == L2CAP_FCS_CRC16) {
1953 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1954 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1957 l2cap_do_send(chan, skb);
1959 BT_DBG("Sent txseq %u", control->txseq);
1961 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1962 chan->frames_sent++;
1966 static int l2cap_ertm_send(struct l2cap_chan *chan)
1968 struct sk_buff *skb, *tx_skb;
1969 struct l2cap_ctrl *control;
1972 BT_DBG("chan %p", chan);
1974 if (chan->state != BT_CONNECTED)
1977 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1980 if (__chan_is_moving(chan))
1983 while (chan->tx_send_head &&
1984 chan->unacked_frames < chan->remote_tx_win &&
1985 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1987 skb = chan->tx_send_head;
1989 bt_cb(skb)->control.retries = 1;
1990 control = &bt_cb(skb)->control;
1992 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1995 control->reqseq = chan->buffer_seq;
1996 chan->last_acked_seq = chan->buffer_seq;
1997 control->txseq = chan->next_tx_seq;
1999 __pack_control(chan, control, skb);
2001 if (chan->fcs == L2CAP_FCS_CRC16) {
2002 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2003 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2006 /* Clone after data has been modified. Data is assumed to be
2007 read-only (for locking purposes) on cloned sk_buffs.
2009 tx_skb = skb_clone(skb, GFP_KERNEL);
2014 __set_retrans_timer(chan);
2016 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2017 chan->unacked_frames++;
2018 chan->frames_sent++;
2021 if (skb_queue_is_last(&chan->tx_q, skb))
2022 chan->tx_send_head = NULL;
2024 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2026 l2cap_do_send(chan, tx_skb);
2027 BT_DBG("Sent txseq %u", control->txseq);
2030 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2031 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2036 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2038 struct l2cap_ctrl control;
2039 struct sk_buff *skb;
2040 struct sk_buff *tx_skb;
2043 BT_DBG("chan %p", chan);
2045 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2048 if (__chan_is_moving(chan))
2051 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2052 seq = l2cap_seq_list_pop(&chan->retrans_list);
2054 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2056 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2061 bt_cb(skb)->control.retries++;
2062 control = bt_cb(skb)->control;
2064 if (chan->max_tx != 0 &&
2065 bt_cb(skb)->control.retries > chan->max_tx) {
2066 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2067 l2cap_send_disconn_req(chan, ECONNRESET);
2068 l2cap_seq_list_clear(&chan->retrans_list);
2072 control.reqseq = chan->buffer_seq;
2073 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2078 if (skb_cloned(skb)) {
2079 /* Cloned sk_buffs are read-only, so we need a
2082 tx_skb = skb_copy(skb, GFP_KERNEL);
2084 tx_skb = skb_clone(skb, GFP_KERNEL);
2088 l2cap_seq_list_clear(&chan->retrans_list);
2092 /* Update skb contents */
2093 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2094 put_unaligned_le32(__pack_extended_control(&control),
2095 tx_skb->data + L2CAP_HDR_SIZE);
2097 put_unaligned_le16(__pack_enhanced_control(&control),
2098 tx_skb->data + L2CAP_HDR_SIZE);
2101 if (chan->fcs == L2CAP_FCS_CRC16) {
2102 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2103 put_unaligned_le16(fcs, skb_put(tx_skb,
2107 l2cap_do_send(chan, tx_skb);
2109 BT_DBG("Resent txseq %d", control.txseq);
2111 chan->last_acked_seq = chan->buffer_seq;
2115 static void l2cap_retransmit(struct l2cap_chan *chan,
2116 struct l2cap_ctrl *control)
2118 BT_DBG("chan %p, control %p", chan, control);
2120 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2121 l2cap_ertm_resend(chan);
2124 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2125 struct l2cap_ctrl *control)
2127 struct sk_buff *skb;
2129 BT_DBG("chan %p, control %p", chan, control);
2132 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2134 l2cap_seq_list_clear(&chan->retrans_list);
2136 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2139 if (chan->unacked_frames) {
2140 skb_queue_walk(&chan->tx_q, skb) {
2141 if (bt_cb(skb)->control.txseq == control->reqseq ||
2142 skb == chan->tx_send_head)
2146 skb_queue_walk_from(&chan->tx_q, skb) {
2147 if (skb == chan->tx_send_head)
2150 l2cap_seq_list_append(&chan->retrans_list,
2151 bt_cb(skb)->control.txseq);
2154 l2cap_ertm_resend(chan);
2158 static void l2cap_send_ack(struct l2cap_chan *chan)
2160 struct l2cap_ctrl control;
2161 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2162 chan->last_acked_seq);
2165 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2166 chan, chan->last_acked_seq, chan->buffer_seq);
2168 memset(&control, 0, sizeof(control));
2171 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2172 chan->rx_state == L2CAP_RX_STATE_RECV) {
2173 __clear_ack_timer(chan);
2174 control.super = L2CAP_SUPER_RNR;
2175 control.reqseq = chan->buffer_seq;
2176 l2cap_send_sframe(chan, &control);
2178 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2179 l2cap_ertm_send(chan);
2180 /* If any i-frames were sent, they included an ack */
2181 if (chan->buffer_seq == chan->last_acked_seq)
2185 /* Ack now if the window is 3/4ths full.
2186 * Calculate without mul or div
2188 threshold = chan->ack_win;
2189 threshold += threshold << 1;
2192 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2195 if (frames_to_ack >= threshold) {
2196 __clear_ack_timer(chan);
2197 control.super = L2CAP_SUPER_RR;
2198 control.reqseq = chan->buffer_seq;
2199 l2cap_send_sframe(chan, &control);
2204 __set_ack_timer(chan);
2208 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2209 struct msghdr *msg, int len,
2210 int count, struct sk_buff *skb)
2212 struct l2cap_conn *conn = chan->conn;
2213 struct sk_buff **frag;
2216 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2222 /* Continuation fragments (no L2CAP header) */
2223 frag = &skb_shinfo(skb)->frag_list;
2225 struct sk_buff *tmp;
2227 count = min_t(unsigned int, conn->mtu, len);
2229 tmp = chan->ops->alloc_skb(chan, count,
2230 msg->msg_flags & MSG_DONTWAIT);
2232 return PTR_ERR(tmp);
2236 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2239 (*frag)->priority = skb->priority;
2244 skb->len += (*frag)->len;
2245 skb->data_len += (*frag)->len;
2247 frag = &(*frag)->next;
2253 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2254 struct msghdr *msg, size_t len,
2257 struct l2cap_conn *conn = chan->conn;
2258 struct sk_buff *skb;
2259 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2260 struct l2cap_hdr *lh;
2262 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2263 __le16_to_cpu(chan->psm), len, priority);
2265 count = min_t(unsigned int, (conn->mtu - hlen), len);
2267 skb = chan->ops->alloc_skb(chan, count + hlen,
2268 msg->msg_flags & MSG_DONTWAIT);
2272 skb->priority = priority;
2274 /* Create L2CAP header */
2275 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2276 lh->cid = cpu_to_le16(chan->dcid);
2277 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2278 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2280 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2281 if (unlikely(err < 0)) {
2283 return ERR_PTR(err);
2288 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2289 struct msghdr *msg, size_t len,
2292 struct l2cap_conn *conn = chan->conn;
2293 struct sk_buff *skb;
2295 struct l2cap_hdr *lh;
2297 BT_DBG("chan %p len %zu", chan, len);
2299 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2301 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2302 msg->msg_flags & MSG_DONTWAIT);
2306 skb->priority = priority;
2308 /* Create L2CAP header */
2309 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2310 lh->cid = cpu_to_le16(chan->dcid);
2311 lh->len = cpu_to_le16(len);
2313 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2314 if (unlikely(err < 0)) {
2316 return ERR_PTR(err);
2321 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2322 struct msghdr *msg, size_t len,
2325 struct l2cap_conn *conn = chan->conn;
2326 struct sk_buff *skb;
2327 int err, count, hlen;
2328 struct l2cap_hdr *lh;
2330 BT_DBG("chan %p len %zu", chan, len);
2333 return ERR_PTR(-ENOTCONN);
2335 hlen = __ertm_hdr_size(chan);
2338 hlen += L2CAP_SDULEN_SIZE;
2340 if (chan->fcs == L2CAP_FCS_CRC16)
2341 hlen += L2CAP_FCS_SIZE;
2343 count = min_t(unsigned int, (conn->mtu - hlen), len);
2345 skb = chan->ops->alloc_skb(chan, count + hlen,
2346 msg->msg_flags & MSG_DONTWAIT);
2350 /* Create L2CAP header */
2351 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2352 lh->cid = cpu_to_le16(chan->dcid);
2353 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2355 /* Control header is populated later */
2356 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2357 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2359 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2362 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2364 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2365 if (unlikely(err < 0)) {
2367 return ERR_PTR(err);
2370 bt_cb(skb)->control.fcs = chan->fcs;
2371 bt_cb(skb)->control.retries = 0;
2375 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2376 struct sk_buff_head *seg_queue,
2377 struct msghdr *msg, size_t len)
2379 struct sk_buff *skb;
2384 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2386 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2387 * so fragmented skbs are not used. The HCI layer's handling
2388 * of fragmented skbs is not compatible with ERTM's queueing.
2391 /* PDU size is derived from the HCI MTU */
2392 pdu_len = chan->conn->mtu;
2394 /* Constrain PDU size for BR/EDR connections */
2396 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2398 /* Adjust for largest possible L2CAP overhead. */
2400 pdu_len -= L2CAP_FCS_SIZE;
2402 pdu_len -= __ertm_hdr_size(chan);
2404 /* Remote device may have requested smaller PDUs */
2405 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2407 if (len <= pdu_len) {
2408 sar = L2CAP_SAR_UNSEGMENTED;
2412 sar = L2CAP_SAR_START;
2414 pdu_len -= L2CAP_SDULEN_SIZE;
2418 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2421 __skb_queue_purge(seg_queue);
2422 return PTR_ERR(skb);
2425 bt_cb(skb)->control.sar = sar;
2426 __skb_queue_tail(seg_queue, skb);
2431 pdu_len += L2CAP_SDULEN_SIZE;
2434 if (len <= pdu_len) {
2435 sar = L2CAP_SAR_END;
2438 sar = L2CAP_SAR_CONTINUE;
2445 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2448 struct sk_buff *skb;
2450 struct sk_buff_head seg_queue;
2455 /* Connectionless channel */
2456 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2457 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2459 return PTR_ERR(skb);
2461 l2cap_do_send(chan, skb);
2465 switch (chan->mode) {
2466 case L2CAP_MODE_BASIC:
2467 /* Check outgoing MTU */
2468 if (len > chan->omtu)
2471 /* Create a basic PDU */
2472 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2474 return PTR_ERR(skb);
2476 l2cap_do_send(chan, skb);
2480 case L2CAP_MODE_ERTM:
2481 case L2CAP_MODE_STREAMING:
2482 /* Check outgoing MTU */
2483 if (len > chan->omtu) {
2488 __skb_queue_head_init(&seg_queue);
2490 /* Do segmentation before calling in to the state machine,
2491 * since it's possible to block while waiting for memory
2494 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2496 /* The channel could have been closed while segmenting,
2497 * check that it is still connected.
2499 if (chan->state != BT_CONNECTED) {
2500 __skb_queue_purge(&seg_queue);
2507 if (chan->mode == L2CAP_MODE_ERTM)
2508 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2510 l2cap_streaming_send(chan, &seg_queue);
2514 /* If the skbs were not queued for sending, they'll still be in
2515 * seg_queue and need to be purged.
2517 __skb_queue_purge(&seg_queue);
2521 BT_DBG("bad state %1.1x", chan->mode);
2528 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2530 struct l2cap_ctrl control;
2533 BT_DBG("chan %p, txseq %u", chan, txseq);
2535 memset(&control, 0, sizeof(control));
2537 control.super = L2CAP_SUPER_SREJ;
2539 for (seq = chan->expected_tx_seq; seq != txseq;
2540 seq = __next_seq(chan, seq)) {
2541 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2542 control.reqseq = seq;
2543 l2cap_send_sframe(chan, &control);
2544 l2cap_seq_list_append(&chan->srej_list, seq);
2548 chan->expected_tx_seq = __next_seq(chan, txseq);
2551 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2553 struct l2cap_ctrl control;
2555 BT_DBG("chan %p", chan);
2557 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2560 memset(&control, 0, sizeof(control));
2562 control.super = L2CAP_SUPER_SREJ;
2563 control.reqseq = chan->srej_list.tail;
2564 l2cap_send_sframe(chan, &control);
2567 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2569 struct l2cap_ctrl control;
2573 BT_DBG("chan %p, txseq %u", chan, txseq);
2575 memset(&control, 0, sizeof(control));
2577 control.super = L2CAP_SUPER_SREJ;
2579 /* Capture initial list head to allow only one pass through the list. */
2580 initial_head = chan->srej_list.head;
2583 seq = l2cap_seq_list_pop(&chan->srej_list);
2584 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2587 control.reqseq = seq;
2588 l2cap_send_sframe(chan, &control);
2589 l2cap_seq_list_append(&chan->srej_list, seq);
2590 } while (chan->srej_list.head != initial_head);
2593 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2595 struct sk_buff *acked_skb;
2598 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2600 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2603 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2604 chan->expected_ack_seq, chan->unacked_frames);
2606 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2607 ackseq = __next_seq(chan, ackseq)) {
2609 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2611 skb_unlink(acked_skb, &chan->tx_q);
2612 kfree_skb(acked_skb);
2613 chan->unacked_frames--;
2617 chan->expected_ack_seq = reqseq;
2619 if (chan->unacked_frames == 0)
2620 __clear_retrans_timer(chan);
2622 BT_DBG("unacked_frames %u", chan->unacked_frames);
2625 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2627 BT_DBG("chan %p", chan);
2629 chan->expected_tx_seq = chan->buffer_seq;
2630 l2cap_seq_list_clear(&chan->srej_list);
2631 skb_queue_purge(&chan->srej_q);
2632 chan->rx_state = L2CAP_RX_STATE_RECV;
2635 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2636 struct l2cap_ctrl *control,
2637 struct sk_buff_head *skbs, u8 event)
2639 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2643 case L2CAP_EV_DATA_REQUEST:
2644 if (chan->tx_send_head == NULL)
2645 chan->tx_send_head = skb_peek(skbs);
2647 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2648 l2cap_ertm_send(chan);
2650 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2651 BT_DBG("Enter LOCAL_BUSY");
2652 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2654 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2655 /* The SREJ_SENT state must be aborted if we are to
2656 * enter the LOCAL_BUSY state.
2658 l2cap_abort_rx_srej_sent(chan);
2661 l2cap_send_ack(chan);
2664 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2665 BT_DBG("Exit LOCAL_BUSY");
2666 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2668 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2669 struct l2cap_ctrl local_control;
2671 memset(&local_control, 0, sizeof(local_control));
2672 local_control.sframe = 1;
2673 local_control.super = L2CAP_SUPER_RR;
2674 local_control.poll = 1;
2675 local_control.reqseq = chan->buffer_seq;
2676 l2cap_send_sframe(chan, &local_control);
2678 chan->retry_count = 1;
2679 __set_monitor_timer(chan);
2680 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2683 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2684 l2cap_process_reqseq(chan, control->reqseq);
2686 case L2CAP_EV_EXPLICIT_POLL:
2687 l2cap_send_rr_or_rnr(chan, 1);
2688 chan->retry_count = 1;
2689 __set_monitor_timer(chan);
2690 __clear_ack_timer(chan);
2691 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2693 case L2CAP_EV_RETRANS_TO:
2694 l2cap_send_rr_or_rnr(chan, 1);
2695 chan->retry_count = 1;
2696 __set_monitor_timer(chan);
2697 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2699 case L2CAP_EV_RECV_FBIT:
2700 /* Nothing to process */
2707 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2708 struct l2cap_ctrl *control,
2709 struct sk_buff_head *skbs, u8 event)
2711 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2715 case L2CAP_EV_DATA_REQUEST:
2716 if (chan->tx_send_head == NULL)
2717 chan->tx_send_head = skb_peek(skbs);
2718 /* Queue data, but don't send. */
2719 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2721 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2722 BT_DBG("Enter LOCAL_BUSY");
2723 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2725 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2726 /* The SREJ_SENT state must be aborted if we are to
2727 * enter the LOCAL_BUSY state.
2729 l2cap_abort_rx_srej_sent(chan);
2732 l2cap_send_ack(chan);
2735 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2736 BT_DBG("Exit LOCAL_BUSY");
2737 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2739 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2740 struct l2cap_ctrl local_control;
2741 memset(&local_control, 0, sizeof(local_control));
2742 local_control.sframe = 1;
2743 local_control.super = L2CAP_SUPER_RR;
2744 local_control.poll = 1;
2745 local_control.reqseq = chan->buffer_seq;
2746 l2cap_send_sframe(chan, &local_control);
2748 chan->retry_count = 1;
2749 __set_monitor_timer(chan);
2750 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2753 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2754 l2cap_process_reqseq(chan, control->reqseq);
2758 case L2CAP_EV_RECV_FBIT:
2759 if (control && control->final) {
2760 __clear_monitor_timer(chan);
2761 if (chan->unacked_frames > 0)
2762 __set_retrans_timer(chan);
2763 chan->retry_count = 0;
2764 chan->tx_state = L2CAP_TX_STATE_XMIT;
2765 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2768 case L2CAP_EV_EXPLICIT_POLL:
2771 case L2CAP_EV_MONITOR_TO:
2772 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2773 l2cap_send_rr_or_rnr(chan, 1);
2774 __set_monitor_timer(chan);
2775 chan->retry_count++;
2777 l2cap_send_disconn_req(chan, ECONNABORTED);
2785 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2786 struct sk_buff_head *skbs, u8 event)
2788 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2789 chan, control, skbs, event, chan->tx_state);
2791 switch (chan->tx_state) {
2792 case L2CAP_TX_STATE_XMIT:
2793 l2cap_tx_state_xmit(chan, control, skbs, event);
2795 case L2CAP_TX_STATE_WAIT_F:
2796 l2cap_tx_state_wait_f(chan, control, skbs, event);
2804 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2805 struct l2cap_ctrl *control)
2807 BT_DBG("chan %p, control %p", chan, control);
2808 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2811 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2812 struct l2cap_ctrl *control)
2814 BT_DBG("chan %p, control %p", chan, control);
2815 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2818 /* Copy frame to all raw sockets on that connection */
2819 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2821 struct sk_buff *nskb;
2822 struct l2cap_chan *chan;
2824 BT_DBG("conn %p", conn);
2826 mutex_lock(&conn->chan_lock);
2828 list_for_each_entry(chan, &conn->chan_l, list) {
2829 if (chan->chan_type != L2CAP_CHAN_RAW)
2832 /* Don't send frame to the channel it came from */
2833 if (bt_cb(skb)->chan == chan)
2836 nskb = skb_clone(skb, GFP_KERNEL);
2839 if (chan->ops->recv(chan, nskb))
2843 mutex_unlock(&conn->chan_lock);
2846 /* ---- L2CAP signalling commands ---- */
2847 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2848 u8 ident, u16 dlen, void *data)
2850 struct sk_buff *skb, **frag;
2851 struct l2cap_cmd_hdr *cmd;
2852 struct l2cap_hdr *lh;
2855 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2856 conn, code, ident, dlen);
2858 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2861 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2862 count = min_t(unsigned int, conn->mtu, len);
2864 skb = bt_skb_alloc(count, GFP_KERNEL);
2868 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2869 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2871 if (conn->hcon->type == LE_LINK)
2872 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2874 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2876 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2879 cmd->len = cpu_to_le16(dlen);
2882 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2883 memcpy(skb_put(skb, count), data, count);
2889 /* Continuation fragments (no L2CAP header) */
2890 frag = &skb_shinfo(skb)->frag_list;
2892 count = min_t(unsigned int, conn->mtu, len);
2894 *frag = bt_skb_alloc(count, GFP_KERNEL);
2898 memcpy(skb_put(*frag, count), data, count);
2903 frag = &(*frag)->next;
2913 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2916 struct l2cap_conf_opt *opt = *ptr;
2919 len = L2CAP_CONF_OPT_SIZE + opt->len;
2927 *val = *((u8 *) opt->val);
2931 *val = get_unaligned_le16(opt->val);
2935 *val = get_unaligned_le32(opt->val);
2939 *val = (unsigned long) opt->val;
2943 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2947 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2949 struct l2cap_conf_opt *opt = *ptr;
2951 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2958 *((u8 *) opt->val) = val;
2962 put_unaligned_le16(val, opt->val);
2966 put_unaligned_le32(val, opt->val);
2970 memcpy(opt->val, (void *) val, len);
2974 *ptr += L2CAP_CONF_OPT_SIZE + len;
2977 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2979 struct l2cap_conf_efs efs;
2981 switch (chan->mode) {
2982 case L2CAP_MODE_ERTM:
2983 efs.id = chan->local_id;
2984 efs.stype = chan->local_stype;
2985 efs.msdu = cpu_to_le16(chan->local_msdu);
2986 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2987 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2988 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2991 case L2CAP_MODE_STREAMING:
2993 efs.stype = L2CAP_SERV_BESTEFFORT;
2994 efs.msdu = cpu_to_le16(chan->local_msdu);
2995 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3004 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3005 (unsigned long) &efs);
3008 static void l2cap_ack_timeout(struct work_struct *work)
3010 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3014 BT_DBG("chan %p", chan);
3016 l2cap_chan_lock(chan);
3018 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3019 chan->last_acked_seq);
3022 l2cap_send_rr_or_rnr(chan, 0);
3024 l2cap_chan_unlock(chan);
3025 l2cap_chan_put(chan);
3028 int l2cap_ertm_init(struct l2cap_chan *chan)
3032 chan->next_tx_seq = 0;
3033 chan->expected_tx_seq = 0;
3034 chan->expected_ack_seq = 0;
3035 chan->unacked_frames = 0;
3036 chan->buffer_seq = 0;
3037 chan->frames_sent = 0;
3038 chan->last_acked_seq = 0;
3040 chan->sdu_last_frag = NULL;
3043 skb_queue_head_init(&chan->tx_q);
3045 chan->local_amp_id = AMP_ID_BREDR;
3046 chan->move_id = AMP_ID_BREDR;
3047 chan->move_state = L2CAP_MOVE_STABLE;
3048 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3050 if (chan->mode != L2CAP_MODE_ERTM)
3053 chan->rx_state = L2CAP_RX_STATE_RECV;
3054 chan->tx_state = L2CAP_TX_STATE_XMIT;
3056 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3057 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3058 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3060 skb_queue_head_init(&chan->srej_q);
3062 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3066 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3068 l2cap_seq_list_free(&chan->srej_list);
3073 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3076 case L2CAP_MODE_STREAMING:
3077 case L2CAP_MODE_ERTM:
3078 if (l2cap_mode_supported(mode, remote_feat_mask))
3082 return L2CAP_MODE_BASIC;
3086 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3088 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3091 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3093 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3096 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3097 struct l2cap_conf_rfc *rfc)
3099 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3100 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3102 /* Class 1 devices have must have ERTM timeouts
3103 * exceeding the Link Supervision Timeout. The
3104 * default Link Supervision Timeout for AMP
3105 * controllers is 10 seconds.
3107 * Class 1 devices use 0xffffffff for their
3108 * best-effort flush timeout, so the clamping logic
3109 * will result in a timeout that meets the above
3110 * requirement. ERTM timeouts are 16-bit values, so
3111 * the maximum timeout is 65.535 seconds.
3114 /* Convert timeout to milliseconds and round */
3115 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3117 /* This is the recommended formula for class 2 devices
3118 * that start ERTM timers when packets are sent to the
3121 ertm_to = 3 * ertm_to + 500;
3123 if (ertm_to > 0xffff)
3126 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3127 rfc->monitor_timeout = rfc->retrans_timeout;
3129 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3130 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3134 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3136 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3137 __l2cap_ews_supported(chan->conn)) {
3138 /* use extended control field */
3139 set_bit(FLAG_EXT_CTRL, &chan->flags);
3140 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3142 chan->tx_win = min_t(u16, chan->tx_win,
3143 L2CAP_DEFAULT_TX_WINDOW);
3144 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3146 chan->ack_win = chan->tx_win;
3149 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3151 struct l2cap_conf_req *req = data;
3152 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3153 void *ptr = req->data;
3156 BT_DBG("chan %p", chan);
3158 if (chan->num_conf_req || chan->num_conf_rsp)
3161 switch (chan->mode) {
3162 case L2CAP_MODE_STREAMING:
3163 case L2CAP_MODE_ERTM:
3164 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3167 if (__l2cap_efs_supported(chan->conn))
3168 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3172 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3177 if (chan->imtu != L2CAP_DEFAULT_MTU)
3178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3180 switch (chan->mode) {
3181 case L2CAP_MODE_BASIC:
3182 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3183 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3186 rfc.mode = L2CAP_MODE_BASIC;
3188 rfc.max_transmit = 0;
3189 rfc.retrans_timeout = 0;
3190 rfc.monitor_timeout = 0;
3191 rfc.max_pdu_size = 0;
3193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3194 (unsigned long) &rfc);
3197 case L2CAP_MODE_ERTM:
3198 rfc.mode = L2CAP_MODE_ERTM;
3199 rfc.max_transmit = chan->max_tx;
3201 __l2cap_set_ertm_timeouts(chan, &rfc);
3203 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3204 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3206 rfc.max_pdu_size = cpu_to_le16(size);
3208 l2cap_txwin_setup(chan);
3210 rfc.txwin_size = min_t(u16, chan->tx_win,
3211 L2CAP_DEFAULT_TX_WINDOW);
3213 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3214 (unsigned long) &rfc);
3216 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3217 l2cap_add_opt_efs(&ptr, chan);
3219 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3223 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3224 if (chan->fcs == L2CAP_FCS_NONE ||
3225 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3226 chan->fcs = L2CAP_FCS_NONE;
3227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3232 case L2CAP_MODE_STREAMING:
3233 l2cap_txwin_setup(chan);
3234 rfc.mode = L2CAP_MODE_STREAMING;
3236 rfc.max_transmit = 0;
3237 rfc.retrans_timeout = 0;
3238 rfc.monitor_timeout = 0;
3240 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3241 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3243 rfc.max_pdu_size = cpu_to_le16(size);
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3246 (unsigned long) &rfc);
3248 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3249 l2cap_add_opt_efs(&ptr, chan);
3251 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3252 if (chan->fcs == L2CAP_FCS_NONE ||
3253 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3254 chan->fcs = L2CAP_FCS_NONE;
3255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3261 req->dcid = cpu_to_le16(chan->dcid);
3262 req->flags = __constant_cpu_to_le16(0);
3267 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3269 struct l2cap_conf_rsp *rsp = data;
3270 void *ptr = rsp->data;
3271 void *req = chan->conf_req;
3272 int len = chan->conf_len;
3273 int type, hint, olen;
3275 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3276 struct l2cap_conf_efs efs;
3278 u16 mtu = L2CAP_DEFAULT_MTU;
3279 u16 result = L2CAP_CONF_SUCCESS;
3282 BT_DBG("chan %p", chan);
3284 while (len >= L2CAP_CONF_OPT_SIZE) {
3285 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3287 hint = type & L2CAP_CONF_HINT;
3288 type &= L2CAP_CONF_MASK;
3291 case L2CAP_CONF_MTU:
3295 case L2CAP_CONF_FLUSH_TO:
3296 chan->flush_to = val;
3299 case L2CAP_CONF_QOS:
3302 case L2CAP_CONF_RFC:
3303 if (olen == sizeof(rfc))
3304 memcpy(&rfc, (void *) val, olen);
3307 case L2CAP_CONF_FCS:
3308 if (val == L2CAP_FCS_NONE)
3309 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3312 case L2CAP_CONF_EFS:
3314 if (olen == sizeof(efs))
3315 memcpy(&efs, (void *) val, olen);
3318 case L2CAP_CONF_EWS:
3319 if (!chan->conn->hs_enabled)
3320 return -ECONNREFUSED;
3322 set_bit(FLAG_EXT_CTRL, &chan->flags);
3323 set_bit(CONF_EWS_RECV, &chan->conf_state);
3324 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3325 chan->remote_tx_win = val;
3332 result = L2CAP_CONF_UNKNOWN;
3333 *((u8 *) ptr++) = type;
3338 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3341 switch (chan->mode) {
3342 case L2CAP_MODE_STREAMING:
3343 case L2CAP_MODE_ERTM:
3344 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3345 chan->mode = l2cap_select_mode(rfc.mode,
3346 chan->conn->feat_mask);
3351 if (__l2cap_efs_supported(chan->conn))
3352 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3354 return -ECONNREFUSED;
3357 if (chan->mode != rfc.mode)
3358 return -ECONNREFUSED;
3364 if (chan->mode != rfc.mode) {
3365 result = L2CAP_CONF_UNACCEPT;
3366 rfc.mode = chan->mode;
3368 if (chan->num_conf_rsp == 1)
3369 return -ECONNREFUSED;
3371 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3372 (unsigned long) &rfc);
3375 if (result == L2CAP_CONF_SUCCESS) {
3376 /* Configure output options and let the other side know
3377 * which ones we don't like. */
3379 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3380 result = L2CAP_CONF_UNACCEPT;
3383 set_bit(CONF_MTU_DONE, &chan->conf_state);
3385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3388 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3389 efs.stype != L2CAP_SERV_NOTRAFIC &&
3390 efs.stype != chan->local_stype) {
3392 result = L2CAP_CONF_UNACCEPT;
3394 if (chan->num_conf_req >= 1)
3395 return -ECONNREFUSED;
3397 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3399 (unsigned long) &efs);
3401 /* Send PENDING Conf Rsp */
3402 result = L2CAP_CONF_PENDING;
3403 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3408 case L2CAP_MODE_BASIC:
3409 chan->fcs = L2CAP_FCS_NONE;
3410 set_bit(CONF_MODE_DONE, &chan->conf_state);
3413 case L2CAP_MODE_ERTM:
3414 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3415 chan->remote_tx_win = rfc.txwin_size;
3417 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3419 chan->remote_max_tx = rfc.max_transmit;
3421 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3422 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3423 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3424 rfc.max_pdu_size = cpu_to_le16(size);
3425 chan->remote_mps = size;
3427 __l2cap_set_ertm_timeouts(chan, &rfc);
3429 set_bit(CONF_MODE_DONE, &chan->conf_state);
3431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3432 sizeof(rfc), (unsigned long) &rfc);
3434 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3435 chan->remote_id = efs.id;
3436 chan->remote_stype = efs.stype;
3437 chan->remote_msdu = le16_to_cpu(efs.msdu);
3438 chan->remote_flush_to =
3439 le32_to_cpu(efs.flush_to);
3440 chan->remote_acc_lat =
3441 le32_to_cpu(efs.acc_lat);
3442 chan->remote_sdu_itime =
3443 le32_to_cpu(efs.sdu_itime);
3444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3446 (unsigned long) &efs);
3450 case L2CAP_MODE_STREAMING:
3451 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3452 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3453 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3454 rfc.max_pdu_size = cpu_to_le16(size);
3455 chan->remote_mps = size;
3457 set_bit(CONF_MODE_DONE, &chan->conf_state);
3459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3460 (unsigned long) &rfc);
3465 result = L2CAP_CONF_UNACCEPT;
3467 memset(&rfc, 0, sizeof(rfc));
3468 rfc.mode = chan->mode;
3471 if (result == L2CAP_CONF_SUCCESS)
3472 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3474 rsp->scid = cpu_to_le16(chan->dcid);
3475 rsp->result = cpu_to_le16(result);
3476 rsp->flags = __constant_cpu_to_le16(0);
3481 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3482 void *data, u16 *result)
3484 struct l2cap_conf_req *req = data;
3485 void *ptr = req->data;
3488 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3489 struct l2cap_conf_efs efs;
3491 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3493 while (len >= L2CAP_CONF_OPT_SIZE) {
3494 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3497 case L2CAP_CONF_MTU:
3498 if (val < L2CAP_DEFAULT_MIN_MTU) {
3499 *result = L2CAP_CONF_UNACCEPT;
3500 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3506 case L2CAP_CONF_FLUSH_TO:
3507 chan->flush_to = val;
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3512 case L2CAP_CONF_RFC:
3513 if (olen == sizeof(rfc))
3514 memcpy(&rfc, (void *)val, olen);
3516 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3517 rfc.mode != chan->mode)
3518 return -ECONNREFUSED;
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3523 sizeof(rfc), (unsigned long) &rfc);
3526 case L2CAP_CONF_EWS:
3527 chan->ack_win = min_t(u16, val, chan->ack_win);
3528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3532 case L2CAP_CONF_EFS:
3533 if (olen == sizeof(efs))
3534 memcpy(&efs, (void *)val, olen);
3536 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3537 efs.stype != L2CAP_SERV_NOTRAFIC &&
3538 efs.stype != chan->local_stype)
3539 return -ECONNREFUSED;
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3542 (unsigned long) &efs);
3545 case L2CAP_CONF_FCS:
3546 if (*result == L2CAP_CONF_PENDING)
3547 if (val == L2CAP_FCS_NONE)
3548 set_bit(CONF_RECV_NO_FCS,
3554 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3555 return -ECONNREFUSED;
3557 chan->mode = rfc.mode;
3559 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3561 case L2CAP_MODE_ERTM:
3562 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3563 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3564 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3565 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3566 chan->ack_win = min_t(u16, chan->ack_win,
3569 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3570 chan->local_msdu = le16_to_cpu(efs.msdu);
3571 chan->local_sdu_itime =
3572 le32_to_cpu(efs.sdu_itime);
3573 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3574 chan->local_flush_to =
3575 le32_to_cpu(efs.flush_to);
3579 case L2CAP_MODE_STREAMING:
3580 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3584 req->dcid = cpu_to_le16(chan->dcid);
3585 req->flags = __constant_cpu_to_le16(0);
3590 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3591 u16 result, u16 flags)
3593 struct l2cap_conf_rsp *rsp = data;
3594 void *ptr = rsp->data;
3596 BT_DBG("chan %p", chan);
3598 rsp->scid = cpu_to_le16(chan->dcid);
3599 rsp->result = cpu_to_le16(result);
3600 rsp->flags = cpu_to_le16(flags);
3605 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3607 struct l2cap_conn_rsp rsp;
3608 struct l2cap_conn *conn = chan->conn;
3612 rsp.scid = cpu_to_le16(chan->dcid);
3613 rsp.dcid = cpu_to_le16(chan->scid);
3614 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3615 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3618 rsp_code = L2CAP_CREATE_CHAN_RSP;
3620 rsp_code = L2CAP_CONN_RSP;
3622 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3624 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3626 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3629 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3630 l2cap_build_conf_req(chan, buf), buf);
3631 chan->num_conf_req++;
3634 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3638 /* Use sane default values in case a misbehaving remote device
3639 * did not send an RFC or extended window size option.
3641 u16 txwin_ext = chan->ack_win;
3642 struct l2cap_conf_rfc rfc = {
3644 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3645 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3646 .max_pdu_size = cpu_to_le16(chan->imtu),
3647 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3650 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3652 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3655 while (len >= L2CAP_CONF_OPT_SIZE) {
3656 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3659 case L2CAP_CONF_RFC:
3660 if (olen == sizeof(rfc))
3661 memcpy(&rfc, (void *)val, olen);
3663 case L2CAP_CONF_EWS:
3670 case L2CAP_MODE_ERTM:
3671 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3672 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3673 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3674 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3675 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3677 chan->ack_win = min_t(u16, chan->ack_win,
3680 case L2CAP_MODE_STREAMING:
3681 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3685 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3686 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3689 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3691 if (cmd_len < sizeof(*rej))
3694 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3697 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3698 cmd->ident == conn->info_ident) {
3699 cancel_delayed_work(&conn->info_timer);
3701 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3702 conn->info_ident = 0;
3704 l2cap_conn_start(conn);
3710 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3711 struct l2cap_cmd_hdr *cmd,
3712 u8 *data, u8 rsp_code, u8 amp_id)
3714 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3715 struct l2cap_conn_rsp rsp;
3716 struct l2cap_chan *chan = NULL, *pchan;
3717 int result, status = L2CAP_CS_NO_INFO;
3719 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3720 __le16 psm = req->psm;
3722 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3724 /* Check if we have socket listening on psm */
3725 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3726 &conn->hcon->dst, ACL_LINK);
3728 result = L2CAP_CR_BAD_PSM;
3732 mutex_lock(&conn->chan_lock);
3733 l2cap_chan_lock(pchan);
3735 /* Check if the ACL is secure enough (if not SDP) */
3736 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3737 !hci_conn_check_link_mode(conn->hcon)) {
3738 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3739 result = L2CAP_CR_SEC_BLOCK;
3743 result = L2CAP_CR_NO_MEM;
3745 /* Check if we already have channel with that dcid */
3746 if (__l2cap_get_chan_by_dcid(conn, scid))
3749 chan = pchan->ops->new_connection(pchan);
3753 /* For certain devices (ex: HID mouse), support for authentication,
3754 * pairing and bonding is optional. For such devices, inorder to avoid
3755 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3756 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3758 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3760 bacpy(&chan->src, &conn->hcon->src);
3761 bacpy(&chan->dst, &conn->hcon->dst);
3762 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3763 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3766 chan->local_amp_id = amp_id;
3768 __l2cap_chan_add(conn, chan);
3772 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3774 chan->ident = cmd->ident;
3776 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3777 if (l2cap_chan_check_security(chan)) {
3778 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3779 l2cap_state_change(chan, BT_CONNECT2);
3780 result = L2CAP_CR_PEND;
3781 status = L2CAP_CS_AUTHOR_PEND;
3782 chan->ops->defer(chan);
3784 /* Force pending result for AMP controllers.
3785 * The connection will succeed after the
3786 * physical link is up.
3788 if (amp_id == AMP_ID_BREDR) {
3789 l2cap_state_change(chan, BT_CONFIG);
3790 result = L2CAP_CR_SUCCESS;
3792 l2cap_state_change(chan, BT_CONNECT2);
3793 result = L2CAP_CR_PEND;
3795 status = L2CAP_CS_NO_INFO;
3798 l2cap_state_change(chan, BT_CONNECT2);
3799 result = L2CAP_CR_PEND;
3800 status = L2CAP_CS_AUTHEN_PEND;
3803 l2cap_state_change(chan, BT_CONNECT2);
3804 result = L2CAP_CR_PEND;
3805 status = L2CAP_CS_NO_INFO;
3809 l2cap_chan_unlock(pchan);
3810 mutex_unlock(&conn->chan_lock);
3813 rsp.scid = cpu_to_le16(scid);
3814 rsp.dcid = cpu_to_le16(dcid);
3815 rsp.result = cpu_to_le16(result);
3816 rsp.status = cpu_to_le16(status);
3817 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3819 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3820 struct l2cap_info_req info;
3821 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3823 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3824 conn->info_ident = l2cap_get_ident(conn);
3826 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3828 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3829 sizeof(info), &info);
3832 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3833 result == L2CAP_CR_SUCCESS) {
3835 set_bit(CONF_REQ_SENT, &chan->conf_state);
3836 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3837 l2cap_build_conf_req(chan, buf), buf);
3838 chan->num_conf_req++;
3844 static int l2cap_connect_req(struct l2cap_conn *conn,
3845 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3847 struct hci_dev *hdev = conn->hcon->hdev;
3848 struct hci_conn *hcon = conn->hcon;
3850 if (cmd_len < sizeof(struct l2cap_conn_req))
3854 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3855 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3856 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3857 hcon->dst_type, 0, NULL, 0,
3859 hci_dev_unlock(hdev);
3861 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3865 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3866 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3869 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3870 u16 scid, dcid, result, status;
3871 struct l2cap_chan *chan;
3875 if (cmd_len < sizeof(*rsp))
3878 scid = __le16_to_cpu(rsp->scid);
3879 dcid = __le16_to_cpu(rsp->dcid);
3880 result = __le16_to_cpu(rsp->result);
3881 status = __le16_to_cpu(rsp->status);
3883 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3884 dcid, scid, result, status);
3886 mutex_lock(&conn->chan_lock);
3889 chan = __l2cap_get_chan_by_scid(conn, scid);
3895 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3904 l2cap_chan_lock(chan);
3907 case L2CAP_CR_SUCCESS:
3908 l2cap_state_change(chan, BT_CONFIG);
3911 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3913 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3916 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3917 l2cap_build_conf_req(chan, req), req);
3918 chan->num_conf_req++;
3922 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3926 l2cap_chan_del(chan, ECONNREFUSED);
3930 l2cap_chan_unlock(chan);
3933 mutex_unlock(&conn->chan_lock);
3938 static inline void set_default_fcs(struct l2cap_chan *chan)
3940 /* FCS is enabled only in ERTM or streaming mode, if one or both
3943 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3944 chan->fcs = L2CAP_FCS_NONE;
3945 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3946 chan->fcs = L2CAP_FCS_CRC16;
3949 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3950 u8 ident, u16 flags)
3952 struct l2cap_conn *conn = chan->conn;
3954 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3957 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3958 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3960 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3961 l2cap_build_conf_rsp(chan, data,
3962 L2CAP_CONF_SUCCESS, flags), data);
3965 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3968 struct l2cap_cmd_rej_cid rej;
3970 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3971 rej.scid = __cpu_to_le16(scid);
3972 rej.dcid = __cpu_to_le16(dcid);
3974 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3977 static inline int l2cap_config_req(struct l2cap_conn *conn,
3978 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3981 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3984 struct l2cap_chan *chan;
3987 if (cmd_len < sizeof(*req))
3990 dcid = __le16_to_cpu(req->dcid);
3991 flags = __le16_to_cpu(req->flags);
3993 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3995 chan = l2cap_get_chan_by_scid(conn, dcid);
3997 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4001 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4002 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4007 /* Reject if config buffer is too small. */
4008 len = cmd_len - sizeof(*req);
4009 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4010 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4011 l2cap_build_conf_rsp(chan, rsp,
4012 L2CAP_CONF_REJECT, flags), rsp);
4017 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4018 chan->conf_len += len;
4020 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4021 /* Incomplete config. Send empty response. */
4022 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4023 l2cap_build_conf_rsp(chan, rsp,
4024 L2CAP_CONF_SUCCESS, flags), rsp);
4028 /* Complete config. */
4029 len = l2cap_parse_conf_req(chan, rsp);
4031 l2cap_send_disconn_req(chan, ECONNRESET);
4035 chan->ident = cmd->ident;
4036 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4037 chan->num_conf_rsp++;
4039 /* Reset config buffer. */
4042 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4045 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4046 set_default_fcs(chan);
4048 if (chan->mode == L2CAP_MODE_ERTM ||
4049 chan->mode == L2CAP_MODE_STREAMING)
4050 err = l2cap_ertm_init(chan);
4053 l2cap_send_disconn_req(chan, -err);
4055 l2cap_chan_ready(chan);
4060 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4062 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4063 l2cap_build_conf_req(chan, buf), buf);
4064 chan->num_conf_req++;
4067 /* Got Conf Rsp PENDING from remote side and asume we sent
4068 Conf Rsp PENDING in the code above */
4069 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4070 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4072 /* check compatibility */
4074 /* Send rsp for BR/EDR channel */
4076 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4078 chan->ident = cmd->ident;
4082 l2cap_chan_unlock(chan);
4086 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4087 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4090 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4091 u16 scid, flags, result;
4092 struct l2cap_chan *chan;
4093 int len = cmd_len - sizeof(*rsp);
4096 if (cmd_len < sizeof(*rsp))
4099 scid = __le16_to_cpu(rsp->scid);
4100 flags = __le16_to_cpu(rsp->flags);
4101 result = __le16_to_cpu(rsp->result);
4103 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4106 chan = l2cap_get_chan_by_scid(conn, scid);
4111 case L2CAP_CONF_SUCCESS:
4112 l2cap_conf_rfc_get(chan, rsp->data, len);
4113 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4116 case L2CAP_CONF_PENDING:
4117 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4119 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4122 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4125 l2cap_send_disconn_req(chan, ECONNRESET);
4129 if (!chan->hs_hcon) {
4130 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4133 if (l2cap_check_efs(chan)) {
4134 amp_create_logical_link(chan);
4135 chan->ident = cmd->ident;
4141 case L2CAP_CONF_UNACCEPT:
4142 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4145 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4146 l2cap_send_disconn_req(chan, ECONNRESET);
4150 /* throw out any old stored conf requests */
4151 result = L2CAP_CONF_SUCCESS;
4152 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4155 l2cap_send_disconn_req(chan, ECONNRESET);
4159 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4160 L2CAP_CONF_REQ, len, req);
4161 chan->num_conf_req++;
4162 if (result != L2CAP_CONF_SUCCESS)
4168 l2cap_chan_set_err(chan, ECONNRESET);
4170 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4171 l2cap_send_disconn_req(chan, ECONNRESET);
4175 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4178 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4180 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4181 set_default_fcs(chan);
4183 if (chan->mode == L2CAP_MODE_ERTM ||
4184 chan->mode == L2CAP_MODE_STREAMING)
4185 err = l2cap_ertm_init(chan);
4188 l2cap_send_disconn_req(chan, -err);
4190 l2cap_chan_ready(chan);
4194 l2cap_chan_unlock(chan);
4198 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4199 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4202 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4203 struct l2cap_disconn_rsp rsp;
4205 struct l2cap_chan *chan;
4207 if (cmd_len != sizeof(*req))
4210 scid = __le16_to_cpu(req->scid);
4211 dcid = __le16_to_cpu(req->dcid);
4213 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4215 mutex_lock(&conn->chan_lock);
4217 chan = __l2cap_get_chan_by_scid(conn, dcid);
4219 mutex_unlock(&conn->chan_lock);
4220 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4224 l2cap_chan_lock(chan);
4226 rsp.dcid = cpu_to_le16(chan->scid);
4227 rsp.scid = cpu_to_le16(chan->dcid);
4228 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4230 chan->ops->set_shutdown(chan);
4232 l2cap_chan_hold(chan);
4233 l2cap_chan_del(chan, ECONNRESET);
4235 l2cap_chan_unlock(chan);
4237 chan->ops->close(chan);
4238 l2cap_chan_put(chan);
4240 mutex_unlock(&conn->chan_lock);
4245 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4246 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4249 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4251 struct l2cap_chan *chan;
4253 if (cmd_len != sizeof(*rsp))
4256 scid = __le16_to_cpu(rsp->scid);
4257 dcid = __le16_to_cpu(rsp->dcid);
4259 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4261 mutex_lock(&conn->chan_lock);
4263 chan = __l2cap_get_chan_by_scid(conn, scid);
4265 mutex_unlock(&conn->chan_lock);
4269 l2cap_chan_lock(chan);
4271 l2cap_chan_hold(chan);
4272 l2cap_chan_del(chan, 0);
4274 l2cap_chan_unlock(chan);
4276 chan->ops->close(chan);
4277 l2cap_chan_put(chan);
4279 mutex_unlock(&conn->chan_lock);
4284 static inline int l2cap_information_req(struct l2cap_conn *conn,
4285 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4288 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4291 if (cmd_len != sizeof(*req))
4294 type = __le16_to_cpu(req->type);
4296 BT_DBG("type 0x%4.4x", type);
4298 if (type == L2CAP_IT_FEAT_MASK) {
4300 u32 feat_mask = l2cap_feat_mask;
4301 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4302 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4303 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4305 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4307 if (conn->hs_enabled)
4308 feat_mask |= L2CAP_FEAT_EXT_FLOW
4309 | L2CAP_FEAT_EXT_WINDOW;
4311 put_unaligned_le32(feat_mask, rsp->data);
4312 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4314 } else if (type == L2CAP_IT_FIXED_CHAN) {
4316 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4318 if (conn->hs_enabled)
4319 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4321 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4323 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4324 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4325 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4326 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4329 struct l2cap_info_rsp rsp;
4330 rsp.type = cpu_to_le16(type);
4331 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4332 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4339 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4340 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4343 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4346 if (cmd_len < sizeof(*rsp))
4349 type = __le16_to_cpu(rsp->type);
4350 result = __le16_to_cpu(rsp->result);
4352 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4354 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4355 if (cmd->ident != conn->info_ident ||
4356 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4359 cancel_delayed_work(&conn->info_timer);
4361 if (result != L2CAP_IR_SUCCESS) {
4362 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4363 conn->info_ident = 0;
4365 l2cap_conn_start(conn);
4371 case L2CAP_IT_FEAT_MASK:
4372 conn->feat_mask = get_unaligned_le32(rsp->data);
4374 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4375 struct l2cap_info_req req;
4376 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4378 conn->info_ident = l2cap_get_ident(conn);
4380 l2cap_send_cmd(conn, conn->info_ident,
4381 L2CAP_INFO_REQ, sizeof(req), &req);
4383 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4384 conn->info_ident = 0;
4386 l2cap_conn_start(conn);
4390 case L2CAP_IT_FIXED_CHAN:
4391 conn->fixed_chan_mask = rsp->data[0];
4392 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4393 conn->info_ident = 0;
4395 l2cap_conn_start(conn);
4402 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4403 struct l2cap_cmd_hdr *cmd,
4404 u16 cmd_len, void *data)
4406 struct l2cap_create_chan_req *req = data;
4407 struct l2cap_create_chan_rsp rsp;
4408 struct l2cap_chan *chan;
4409 struct hci_dev *hdev;
4412 if (cmd_len != sizeof(*req))
4415 if (!conn->hs_enabled)
4418 psm = le16_to_cpu(req->psm);
4419 scid = le16_to_cpu(req->scid);
4421 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4423 /* For controller id 0 make BR/EDR connection */
4424 if (req->amp_id == AMP_ID_BREDR) {
4425 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4430 /* Validate AMP controller id */
4431 hdev = hci_dev_get(req->amp_id);
4435 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4440 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4443 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4444 struct hci_conn *hs_hcon;
4446 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4450 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4455 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4457 mgr->bredr_chan = chan;
4458 chan->hs_hcon = hs_hcon;
4459 chan->fcs = L2CAP_FCS_NONE;
4460 conn->mtu = hdev->block_mtu;
4469 rsp.scid = cpu_to_le16(scid);
4470 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4471 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4473 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4479 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4481 struct l2cap_move_chan_req req;
4484 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4486 ident = l2cap_get_ident(chan->conn);
4487 chan->ident = ident;
4489 req.icid = cpu_to_le16(chan->scid);
4490 req.dest_amp_id = dest_amp_id;
4492 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4495 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4498 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4500 struct l2cap_move_chan_rsp rsp;
4502 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4504 rsp.icid = cpu_to_le16(chan->dcid);
4505 rsp.result = cpu_to_le16(result);
4507 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4511 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4513 struct l2cap_move_chan_cfm cfm;
4515 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4517 chan->ident = l2cap_get_ident(chan->conn);
4519 cfm.icid = cpu_to_le16(chan->scid);
4520 cfm.result = cpu_to_le16(result);
4522 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4525 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4528 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4530 struct l2cap_move_chan_cfm cfm;
4532 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4534 cfm.icid = cpu_to_le16(icid);
4535 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4537 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4541 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4544 struct l2cap_move_chan_cfm_rsp rsp;
4546 BT_DBG("icid 0x%4.4x", icid);
4548 rsp.icid = cpu_to_le16(icid);
4549 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4552 static void __release_logical_link(struct l2cap_chan *chan)
4554 chan->hs_hchan = NULL;
4555 chan->hs_hcon = NULL;
4557 /* Placeholder - release the logical link */
4560 static void l2cap_logical_fail(struct l2cap_chan *chan)
4562 /* Logical link setup failed */
4563 if (chan->state != BT_CONNECTED) {
4564 /* Create channel failure, disconnect */
4565 l2cap_send_disconn_req(chan, ECONNRESET);
4569 switch (chan->move_role) {
4570 case L2CAP_MOVE_ROLE_RESPONDER:
4571 l2cap_move_done(chan);
4572 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4574 case L2CAP_MOVE_ROLE_INITIATOR:
4575 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4576 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4577 /* Remote has only sent pending or
4578 * success responses, clean up
4580 l2cap_move_done(chan);
4583 /* Other amp move states imply that the move
4584 * has already aborted
4586 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4591 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4592 struct hci_chan *hchan)
4594 struct l2cap_conf_rsp rsp;
4596 chan->hs_hchan = hchan;
4597 chan->hs_hcon->l2cap_data = chan->conn;
4599 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4601 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4604 set_default_fcs(chan);
4606 err = l2cap_ertm_init(chan);
4608 l2cap_send_disconn_req(chan, -err);
4610 l2cap_chan_ready(chan);
4614 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4615 struct hci_chan *hchan)
4617 chan->hs_hcon = hchan->conn;
4618 chan->hs_hcon->l2cap_data = chan->conn;
4620 BT_DBG("move_state %d", chan->move_state);
4622 switch (chan->move_state) {
4623 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4624 /* Move confirm will be sent after a success
4625 * response is received
4627 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4629 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4630 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4631 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4632 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4633 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4634 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4635 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4636 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4637 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4641 /* Move was not in expected state, free the channel */
4642 __release_logical_link(chan);
4644 chan->move_state = L2CAP_MOVE_STABLE;
4648 /* Call with chan locked */
4649 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4652 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4655 l2cap_logical_fail(chan);
4656 __release_logical_link(chan);
4660 if (chan->state != BT_CONNECTED) {
4661 /* Ignore logical link if channel is on BR/EDR */
4662 if (chan->local_amp_id != AMP_ID_BREDR)
4663 l2cap_logical_finish_create(chan, hchan);
4665 l2cap_logical_finish_move(chan, hchan);
4669 void l2cap_move_start(struct l2cap_chan *chan)
4671 BT_DBG("chan %p", chan);
4673 if (chan->local_amp_id == AMP_ID_BREDR) {
4674 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4676 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4677 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4678 /* Placeholder - start physical link setup */
4680 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4681 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4683 l2cap_move_setup(chan);
4684 l2cap_send_move_chan_req(chan, 0);
4688 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4689 u8 local_amp_id, u8 remote_amp_id)
4691 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4692 local_amp_id, remote_amp_id);
4694 chan->fcs = L2CAP_FCS_NONE;
4696 /* Outgoing channel on AMP */
4697 if (chan->state == BT_CONNECT) {
4698 if (result == L2CAP_CR_SUCCESS) {
4699 chan->local_amp_id = local_amp_id;
4700 l2cap_send_create_chan_req(chan, remote_amp_id);
4702 /* Revert to BR/EDR connect */
4703 l2cap_send_conn_req(chan);
4709 /* Incoming channel on AMP */
4710 if (__l2cap_no_conn_pending(chan)) {
4711 struct l2cap_conn_rsp rsp;
4713 rsp.scid = cpu_to_le16(chan->dcid);
4714 rsp.dcid = cpu_to_le16(chan->scid);
4716 if (result == L2CAP_CR_SUCCESS) {
4717 /* Send successful response */
4718 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4719 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4721 /* Send negative response */
4722 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4723 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4726 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4729 if (result == L2CAP_CR_SUCCESS) {
4730 l2cap_state_change(chan, BT_CONFIG);
4731 set_bit(CONF_REQ_SENT, &chan->conf_state);
4732 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4734 l2cap_build_conf_req(chan, buf), buf);
4735 chan->num_conf_req++;
4740 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4743 l2cap_move_setup(chan);
4744 chan->move_id = local_amp_id;
4745 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4747 l2cap_send_move_chan_req(chan, remote_amp_id);
4750 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4752 struct hci_chan *hchan = NULL;
4754 /* Placeholder - get hci_chan for logical link */
4757 if (hchan->state == BT_CONNECTED) {
4758 /* Logical link is ready to go */
4759 chan->hs_hcon = hchan->conn;
4760 chan->hs_hcon->l2cap_data = chan->conn;
4761 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4762 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4764 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4766 /* Wait for logical link to be ready */
4767 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4770 /* Logical link not available */
4771 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4775 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4777 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4779 if (result == -EINVAL)
4780 rsp_result = L2CAP_MR_BAD_ID;
4782 rsp_result = L2CAP_MR_NOT_ALLOWED;
4784 l2cap_send_move_chan_rsp(chan, rsp_result);
4787 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4788 chan->move_state = L2CAP_MOVE_STABLE;
4790 /* Restart data transmission */
4791 l2cap_ertm_send(chan);
4794 /* Invoke with locked chan */
4795 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4797 u8 local_amp_id = chan->local_amp_id;
4798 u8 remote_amp_id = chan->remote_amp_id;
4800 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4801 chan, result, local_amp_id, remote_amp_id);
4803 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4804 l2cap_chan_unlock(chan);
4808 if (chan->state != BT_CONNECTED) {
4809 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4810 } else if (result != L2CAP_MR_SUCCESS) {
4811 l2cap_do_move_cancel(chan, result);
4813 switch (chan->move_role) {
4814 case L2CAP_MOVE_ROLE_INITIATOR:
4815 l2cap_do_move_initiate(chan, local_amp_id,
4818 case L2CAP_MOVE_ROLE_RESPONDER:
4819 l2cap_do_move_respond(chan, result);
4822 l2cap_do_move_cancel(chan, result);
4828 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4829 struct l2cap_cmd_hdr *cmd,
4830 u16 cmd_len, void *data)
4832 struct l2cap_move_chan_req *req = data;
4833 struct l2cap_move_chan_rsp rsp;
4834 struct l2cap_chan *chan;
4836 u16 result = L2CAP_MR_NOT_ALLOWED;
4838 if (cmd_len != sizeof(*req))
4841 icid = le16_to_cpu(req->icid);
4843 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4845 if (!conn->hs_enabled)
4848 chan = l2cap_get_chan_by_dcid(conn, icid);
4850 rsp.icid = cpu_to_le16(icid);
4851 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4852 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4857 chan->ident = cmd->ident;
4859 if (chan->scid < L2CAP_CID_DYN_START ||
4860 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4861 (chan->mode != L2CAP_MODE_ERTM &&
4862 chan->mode != L2CAP_MODE_STREAMING)) {
4863 result = L2CAP_MR_NOT_ALLOWED;
4864 goto send_move_response;
4867 if (chan->local_amp_id == req->dest_amp_id) {
4868 result = L2CAP_MR_SAME_ID;
4869 goto send_move_response;
4872 if (req->dest_amp_id != AMP_ID_BREDR) {
4873 struct hci_dev *hdev;
4874 hdev = hci_dev_get(req->dest_amp_id);
4875 if (!hdev || hdev->dev_type != HCI_AMP ||
4876 !test_bit(HCI_UP, &hdev->flags)) {
4880 result = L2CAP_MR_BAD_ID;
4881 goto send_move_response;
4886 /* Detect a move collision. Only send a collision response
4887 * if this side has "lost", otherwise proceed with the move.
4888 * The winner has the larger bd_addr.
4890 if ((__chan_is_moving(chan) ||
4891 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4892 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4893 result = L2CAP_MR_COLLISION;
4894 goto send_move_response;
4897 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4898 l2cap_move_setup(chan);
4899 chan->move_id = req->dest_amp_id;
4902 if (req->dest_amp_id == AMP_ID_BREDR) {
4903 /* Moving to BR/EDR */
4904 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4905 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4906 result = L2CAP_MR_PEND;
4908 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4909 result = L2CAP_MR_SUCCESS;
4912 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4913 /* Placeholder - uncomment when amp functions are available */
4914 /*amp_accept_physical(chan, req->dest_amp_id);*/
4915 result = L2CAP_MR_PEND;
4919 l2cap_send_move_chan_rsp(chan, result);
4921 l2cap_chan_unlock(chan);
4926 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4928 struct l2cap_chan *chan;
4929 struct hci_chan *hchan = NULL;
4931 chan = l2cap_get_chan_by_scid(conn, icid);
4933 l2cap_send_move_chan_cfm_icid(conn, icid);
4937 __clear_chan_timer(chan);
4938 if (result == L2CAP_MR_PEND)
4939 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4941 switch (chan->move_state) {
4942 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4943 /* Move confirm will be sent when logical link
4946 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4948 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4949 if (result == L2CAP_MR_PEND) {
4951 } else if (test_bit(CONN_LOCAL_BUSY,
4952 &chan->conn_state)) {
4953 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4955 /* Logical link is up or moving to BR/EDR,
4958 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4959 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4962 case L2CAP_MOVE_WAIT_RSP:
4964 if (result == L2CAP_MR_SUCCESS) {
4965 /* Remote is ready, send confirm immediately
4966 * after logical link is ready
4968 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4970 /* Both logical link and move success
4971 * are required to confirm
4973 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4976 /* Placeholder - get hci_chan for logical link */
4978 /* Logical link not available */
4979 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4983 /* If the logical link is not yet connected, do not
4984 * send confirmation.
4986 if (hchan->state != BT_CONNECTED)
4989 /* Logical link is already ready to go */
4991 chan->hs_hcon = hchan->conn;
4992 chan->hs_hcon->l2cap_data = chan->conn;
4994 if (result == L2CAP_MR_SUCCESS) {
4995 /* Can confirm now */
4996 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4998 /* Now only need move success
5001 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5004 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5007 /* Any other amp move state means the move failed. */
5008 chan->move_id = chan->local_amp_id;
5009 l2cap_move_done(chan);
5010 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5013 l2cap_chan_unlock(chan);
5016 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5019 struct l2cap_chan *chan;
5021 chan = l2cap_get_chan_by_ident(conn, ident);
5023 /* Could not locate channel, icid is best guess */
5024 l2cap_send_move_chan_cfm_icid(conn, icid);
5028 __clear_chan_timer(chan);
5030 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5031 if (result == L2CAP_MR_COLLISION) {
5032 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5034 /* Cleanup - cancel move */
5035 chan->move_id = chan->local_amp_id;
5036 l2cap_move_done(chan);
5040 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5042 l2cap_chan_unlock(chan);
5045 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5046 struct l2cap_cmd_hdr *cmd,
5047 u16 cmd_len, void *data)
5049 struct l2cap_move_chan_rsp *rsp = data;
5052 if (cmd_len != sizeof(*rsp))
5055 icid = le16_to_cpu(rsp->icid);
5056 result = le16_to_cpu(rsp->result);
5058 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5060 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5061 l2cap_move_continue(conn, icid, result);
5063 l2cap_move_fail(conn, cmd->ident, icid, result);
5068 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5069 struct l2cap_cmd_hdr *cmd,
5070 u16 cmd_len, void *data)
5072 struct l2cap_move_chan_cfm *cfm = data;
5073 struct l2cap_chan *chan;
5076 if (cmd_len != sizeof(*cfm))
5079 icid = le16_to_cpu(cfm->icid);
5080 result = le16_to_cpu(cfm->result);
5082 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5084 chan = l2cap_get_chan_by_dcid(conn, icid);
5086 /* Spec requires a response even if the icid was not found */
5087 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5091 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5092 if (result == L2CAP_MC_CONFIRMED) {
5093 chan->local_amp_id = chan->move_id;
5094 if (chan->local_amp_id == AMP_ID_BREDR)
5095 __release_logical_link(chan);
5097 chan->move_id = chan->local_amp_id;
5100 l2cap_move_done(chan);
5103 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5105 l2cap_chan_unlock(chan);
5110 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5111 struct l2cap_cmd_hdr *cmd,
5112 u16 cmd_len, void *data)
5114 struct l2cap_move_chan_cfm_rsp *rsp = data;
5115 struct l2cap_chan *chan;
5118 if (cmd_len != sizeof(*rsp))
5121 icid = le16_to_cpu(rsp->icid);
5123 BT_DBG("icid 0x%4.4x", icid);
5125 chan = l2cap_get_chan_by_scid(conn, icid);
5129 __clear_chan_timer(chan);
5131 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5132 chan->local_amp_id = chan->move_id;
5134 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5135 __release_logical_link(chan);
5137 l2cap_move_done(chan);
5140 l2cap_chan_unlock(chan);
5145 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5150 if (min > max || min < 6 || max > 3200)
5153 if (to_multiplier < 10 || to_multiplier > 3200)
5156 if (max >= to_multiplier * 8)
5159 max_latency = (to_multiplier * 8 / max) - 1;
5160 if (latency > 499 || latency > max_latency)
5166 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5167 struct l2cap_cmd_hdr *cmd,
5170 struct hci_conn *hcon = conn->hcon;
5171 struct l2cap_conn_param_update_req *req;
5172 struct l2cap_conn_param_update_rsp rsp;
5173 u16 min, max, latency, to_multiplier, cmd_len;
5176 if (!(hcon->link_mode & HCI_LM_MASTER))
5179 cmd_len = __le16_to_cpu(cmd->len);
5180 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5183 req = (struct l2cap_conn_param_update_req *) data;
5184 min = __le16_to_cpu(req->min);
5185 max = __le16_to_cpu(req->max);
5186 latency = __le16_to_cpu(req->latency);
5187 to_multiplier = __le16_to_cpu(req->to_multiplier);
5189 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5190 min, max, latency, to_multiplier);
5192 memset(&rsp, 0, sizeof(rsp));
5194 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5196 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5198 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5200 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5204 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5209 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5210 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5215 switch (cmd->code) {
5216 case L2CAP_COMMAND_REJ:
5217 l2cap_command_rej(conn, cmd, cmd_len, data);
5220 case L2CAP_CONN_REQ:
5221 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5224 case L2CAP_CONN_RSP:
5225 case L2CAP_CREATE_CHAN_RSP:
5226 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5229 case L2CAP_CONF_REQ:
5230 err = l2cap_config_req(conn, cmd, cmd_len, data);
5233 case L2CAP_CONF_RSP:
5234 l2cap_config_rsp(conn, cmd, cmd_len, data);
5237 case L2CAP_DISCONN_REQ:
5238 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5241 case L2CAP_DISCONN_RSP:
5242 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5245 case L2CAP_ECHO_REQ:
5246 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5249 case L2CAP_ECHO_RSP:
5252 case L2CAP_INFO_REQ:
5253 err = l2cap_information_req(conn, cmd, cmd_len, data);
5256 case L2CAP_INFO_RSP:
5257 l2cap_information_rsp(conn, cmd, cmd_len, data);
5260 case L2CAP_CREATE_CHAN_REQ:
5261 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5264 case L2CAP_MOVE_CHAN_REQ:
5265 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5268 case L2CAP_MOVE_CHAN_RSP:
5269 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5272 case L2CAP_MOVE_CHAN_CFM:
5273 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5276 case L2CAP_MOVE_CHAN_CFM_RSP:
5277 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5281 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5289 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5290 struct l2cap_cmd_hdr *cmd, u8 *data)
5292 switch (cmd->code) {
5293 case L2CAP_COMMAND_REJ:
5296 case L2CAP_CONN_PARAM_UPDATE_REQ:
5297 return l2cap_conn_param_update_req(conn, cmd, data);
5299 case L2CAP_CONN_PARAM_UPDATE_RSP:
5303 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5308 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5309 struct sk_buff *skb)
5311 struct hci_conn *hcon = conn->hcon;
5312 struct l2cap_cmd_hdr *cmd;
5316 if (hcon->type != LE_LINK)
5319 if (skb->len < L2CAP_CMD_HDR_SIZE)
5322 cmd = (void *) skb->data;
5323 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5325 len = le16_to_cpu(cmd->len);
5327 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5329 if (len != skb->len || !cmd->ident) {
5330 BT_DBG("corrupted command");
5334 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5336 struct l2cap_cmd_rej_unk rej;
5338 BT_ERR("Wrong link type (%d)", err);
5340 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5341 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5349 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5350 struct sk_buff *skb)
5352 struct hci_conn *hcon = conn->hcon;
5353 u8 *data = skb->data;
5355 struct l2cap_cmd_hdr cmd;
5358 l2cap_raw_recv(conn, skb);
5360 if (hcon->type != ACL_LINK)
5363 while (len >= L2CAP_CMD_HDR_SIZE) {
5365 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5366 data += L2CAP_CMD_HDR_SIZE;
5367 len -= L2CAP_CMD_HDR_SIZE;
5369 cmd_len = le16_to_cpu(cmd.len);
5371 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5374 if (cmd_len > len || !cmd.ident) {
5375 BT_DBG("corrupted command");
5379 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5381 struct l2cap_cmd_rej_unk rej;
5383 BT_ERR("Wrong link type (%d)", err);
5385 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5386 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5398 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5400 u16 our_fcs, rcv_fcs;
5403 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5404 hdr_size = L2CAP_EXT_HDR_SIZE;
5406 hdr_size = L2CAP_ENH_HDR_SIZE;
5408 if (chan->fcs == L2CAP_FCS_CRC16) {
5409 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5410 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5411 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5413 if (our_fcs != rcv_fcs)
5419 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5421 struct l2cap_ctrl control;
5423 BT_DBG("chan %p", chan);
5425 memset(&control, 0, sizeof(control));
5428 control.reqseq = chan->buffer_seq;
5429 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5431 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5432 control.super = L2CAP_SUPER_RNR;
5433 l2cap_send_sframe(chan, &control);
5436 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5437 chan->unacked_frames > 0)
5438 __set_retrans_timer(chan);
5440 /* Send pending iframes */
5441 l2cap_ertm_send(chan);
5443 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5444 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5445 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5448 control.super = L2CAP_SUPER_RR;
5449 l2cap_send_sframe(chan, &control);
5453 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5454 struct sk_buff **last_frag)
5456 /* skb->len reflects data in skb as well as all fragments
5457 * skb->data_len reflects only data in fragments
5459 if (!skb_has_frag_list(skb))
5460 skb_shinfo(skb)->frag_list = new_frag;
5462 new_frag->next = NULL;
5464 (*last_frag)->next = new_frag;
5465 *last_frag = new_frag;
5467 skb->len += new_frag->len;
5468 skb->data_len += new_frag->len;
5469 skb->truesize += new_frag->truesize;
5472 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5473 struct l2cap_ctrl *control)
5477 switch (control->sar) {
5478 case L2CAP_SAR_UNSEGMENTED:
5482 err = chan->ops->recv(chan, skb);
5485 case L2CAP_SAR_START:
5489 chan->sdu_len = get_unaligned_le16(skb->data);
5490 skb_pull(skb, L2CAP_SDULEN_SIZE);
5492 if (chan->sdu_len > chan->imtu) {
5497 if (skb->len >= chan->sdu_len)
5501 chan->sdu_last_frag = skb;
5507 case L2CAP_SAR_CONTINUE:
5511 append_skb_frag(chan->sdu, skb,
5512 &chan->sdu_last_frag);
5515 if (chan->sdu->len >= chan->sdu_len)
5525 append_skb_frag(chan->sdu, skb,
5526 &chan->sdu_last_frag);
5529 if (chan->sdu->len != chan->sdu_len)
5532 err = chan->ops->recv(chan, chan->sdu);
5535 /* Reassembly complete */
5537 chan->sdu_last_frag = NULL;
5545 kfree_skb(chan->sdu);
5547 chan->sdu_last_frag = NULL;
5554 static int l2cap_resegment(struct l2cap_chan *chan)
5560 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5564 if (chan->mode != L2CAP_MODE_ERTM)
5567 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5568 l2cap_tx(chan, NULL, NULL, event);
5571 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5574 /* Pass sequential frames to l2cap_reassemble_sdu()
5575 * until a gap is encountered.
5578 BT_DBG("chan %p", chan);
5580 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5581 struct sk_buff *skb;
5582 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5583 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5585 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5590 skb_unlink(skb, &chan->srej_q);
5591 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5592 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5597 if (skb_queue_empty(&chan->srej_q)) {
5598 chan->rx_state = L2CAP_RX_STATE_RECV;
5599 l2cap_send_ack(chan);
5605 static void l2cap_handle_srej(struct l2cap_chan *chan,
5606 struct l2cap_ctrl *control)
5608 struct sk_buff *skb;
5610 BT_DBG("chan %p, control %p", chan, control);
5612 if (control->reqseq == chan->next_tx_seq) {
5613 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5614 l2cap_send_disconn_req(chan, ECONNRESET);
5618 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5621 BT_DBG("Seq %d not available for retransmission",
5626 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5627 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5628 l2cap_send_disconn_req(chan, ECONNRESET);
5632 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5634 if (control->poll) {
5635 l2cap_pass_to_tx(chan, control);
5637 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5638 l2cap_retransmit(chan, control);
5639 l2cap_ertm_send(chan);
5641 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5642 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5643 chan->srej_save_reqseq = control->reqseq;
5646 l2cap_pass_to_tx_fbit(chan, control);
5648 if (control->final) {
5649 if (chan->srej_save_reqseq != control->reqseq ||
5650 !test_and_clear_bit(CONN_SREJ_ACT,
5652 l2cap_retransmit(chan, control);
5654 l2cap_retransmit(chan, control);
5655 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5656 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5657 chan->srej_save_reqseq = control->reqseq;
5663 static void l2cap_handle_rej(struct l2cap_chan *chan,
5664 struct l2cap_ctrl *control)
5666 struct sk_buff *skb;
5668 BT_DBG("chan %p, control %p", chan, control);
5670 if (control->reqseq == chan->next_tx_seq) {
5671 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5672 l2cap_send_disconn_req(chan, ECONNRESET);
5676 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5678 if (chan->max_tx && skb &&
5679 bt_cb(skb)->control.retries >= chan->max_tx) {
5680 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5681 l2cap_send_disconn_req(chan, ECONNRESET);
5685 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5687 l2cap_pass_to_tx(chan, control);
5689 if (control->final) {
5690 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5691 l2cap_retransmit_all(chan, control);
5693 l2cap_retransmit_all(chan, control);
5694 l2cap_ertm_send(chan);
5695 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5696 set_bit(CONN_REJ_ACT, &chan->conn_state);
5700 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5702 BT_DBG("chan %p, txseq %d", chan, txseq);
5704 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5705 chan->expected_tx_seq);
5707 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5708 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5710 /* See notes below regarding "double poll" and
5713 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5714 BT_DBG("Invalid/Ignore - after SREJ");
5715 return L2CAP_TXSEQ_INVALID_IGNORE;
5717 BT_DBG("Invalid - in window after SREJ sent");
5718 return L2CAP_TXSEQ_INVALID;
5722 if (chan->srej_list.head == txseq) {
5723 BT_DBG("Expected SREJ");
5724 return L2CAP_TXSEQ_EXPECTED_SREJ;
5727 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5728 BT_DBG("Duplicate SREJ - txseq already stored");
5729 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5732 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5733 BT_DBG("Unexpected SREJ - not requested");
5734 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5738 if (chan->expected_tx_seq == txseq) {
5739 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5741 BT_DBG("Invalid - txseq outside tx window");
5742 return L2CAP_TXSEQ_INVALID;
5745 return L2CAP_TXSEQ_EXPECTED;
5749 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5750 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5751 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5752 return L2CAP_TXSEQ_DUPLICATE;
5755 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5756 /* A source of invalid packets is a "double poll" condition,
5757 * where delays cause us to send multiple poll packets. If
5758 * the remote stack receives and processes both polls,
5759 * sequence numbers can wrap around in such a way that a
5760 * resent frame has a sequence number that looks like new data
5761 * with a sequence gap. This would trigger an erroneous SREJ
5764 * Fortunately, this is impossible with a tx window that's
5765 * less than half of the maximum sequence number, which allows
5766 * invalid frames to be safely ignored.
5768 * With tx window sizes greater than half of the tx window
5769 * maximum, the frame is invalid and cannot be ignored. This
5770 * causes a disconnect.
5773 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5774 BT_DBG("Invalid/Ignore - txseq outside tx window");
5775 return L2CAP_TXSEQ_INVALID_IGNORE;
5777 BT_DBG("Invalid - txseq outside tx window");
5778 return L2CAP_TXSEQ_INVALID;
5781 BT_DBG("Unexpected - txseq indicates missing frames");
5782 return L2CAP_TXSEQ_UNEXPECTED;
5786 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5787 struct l2cap_ctrl *control,
5788 struct sk_buff *skb, u8 event)
5791 bool skb_in_use = false;
5793 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5797 case L2CAP_EV_RECV_IFRAME:
5798 switch (l2cap_classify_txseq(chan, control->txseq)) {
5799 case L2CAP_TXSEQ_EXPECTED:
5800 l2cap_pass_to_tx(chan, control);
5802 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5803 BT_DBG("Busy, discarding expected seq %d",
5808 chan->expected_tx_seq = __next_seq(chan,
5811 chan->buffer_seq = chan->expected_tx_seq;
5814 err = l2cap_reassemble_sdu(chan, skb, control);
5818 if (control->final) {
5819 if (!test_and_clear_bit(CONN_REJ_ACT,
5820 &chan->conn_state)) {
5822 l2cap_retransmit_all(chan, control);
5823 l2cap_ertm_send(chan);
5827 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5828 l2cap_send_ack(chan);
5830 case L2CAP_TXSEQ_UNEXPECTED:
5831 l2cap_pass_to_tx(chan, control);
5833 /* Can't issue SREJ frames in the local busy state.
5834 * Drop this frame, it will be seen as missing
5835 * when local busy is exited.
5837 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5838 BT_DBG("Busy, discarding unexpected seq %d",
5843 /* There was a gap in the sequence, so an SREJ
5844 * must be sent for each missing frame. The
5845 * current frame is stored for later use.
5847 skb_queue_tail(&chan->srej_q, skb);
5849 BT_DBG("Queued %p (queue len %d)", skb,
5850 skb_queue_len(&chan->srej_q));
5852 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5853 l2cap_seq_list_clear(&chan->srej_list);
5854 l2cap_send_srej(chan, control->txseq);
5856 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5858 case L2CAP_TXSEQ_DUPLICATE:
5859 l2cap_pass_to_tx(chan, control);
5861 case L2CAP_TXSEQ_INVALID_IGNORE:
5863 case L2CAP_TXSEQ_INVALID:
5865 l2cap_send_disconn_req(chan, ECONNRESET);
5869 case L2CAP_EV_RECV_RR:
5870 l2cap_pass_to_tx(chan, control);
5871 if (control->final) {
5872 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5874 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5875 !__chan_is_moving(chan)) {
5877 l2cap_retransmit_all(chan, control);
5880 l2cap_ertm_send(chan);
5881 } else if (control->poll) {
5882 l2cap_send_i_or_rr_or_rnr(chan);
5884 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5885 &chan->conn_state) &&
5886 chan->unacked_frames)
5887 __set_retrans_timer(chan);
5889 l2cap_ertm_send(chan);
5892 case L2CAP_EV_RECV_RNR:
5893 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5894 l2cap_pass_to_tx(chan, control);
5895 if (control && control->poll) {
5896 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5897 l2cap_send_rr_or_rnr(chan, 0);
5899 __clear_retrans_timer(chan);
5900 l2cap_seq_list_clear(&chan->retrans_list);
5902 case L2CAP_EV_RECV_REJ:
5903 l2cap_handle_rej(chan, control);
5905 case L2CAP_EV_RECV_SREJ:
5906 l2cap_handle_srej(chan, control);
5912 if (skb && !skb_in_use) {
5913 BT_DBG("Freeing %p", skb);
5920 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5921 struct l2cap_ctrl *control,
5922 struct sk_buff *skb, u8 event)
5925 u16 txseq = control->txseq;
5926 bool skb_in_use = false;
5928 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5932 case L2CAP_EV_RECV_IFRAME:
5933 switch (l2cap_classify_txseq(chan, txseq)) {
5934 case L2CAP_TXSEQ_EXPECTED:
5935 /* Keep frame for reassembly later */
5936 l2cap_pass_to_tx(chan, control);
5937 skb_queue_tail(&chan->srej_q, skb);
5939 BT_DBG("Queued %p (queue len %d)", skb,
5940 skb_queue_len(&chan->srej_q));
5942 chan->expected_tx_seq = __next_seq(chan, txseq);
5944 case L2CAP_TXSEQ_EXPECTED_SREJ:
5945 l2cap_seq_list_pop(&chan->srej_list);
5947 l2cap_pass_to_tx(chan, control);
5948 skb_queue_tail(&chan->srej_q, skb);
5950 BT_DBG("Queued %p (queue len %d)", skb,
5951 skb_queue_len(&chan->srej_q));
5953 err = l2cap_rx_queued_iframes(chan);
5958 case L2CAP_TXSEQ_UNEXPECTED:
5959 /* Got a frame that can't be reassembled yet.
5960 * Save it for later, and send SREJs to cover
5961 * the missing frames.
5963 skb_queue_tail(&chan->srej_q, skb);
5965 BT_DBG("Queued %p (queue len %d)", skb,
5966 skb_queue_len(&chan->srej_q));
5968 l2cap_pass_to_tx(chan, control);
5969 l2cap_send_srej(chan, control->txseq);
5971 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5972 /* This frame was requested with an SREJ, but
5973 * some expected retransmitted frames are
5974 * missing. Request retransmission of missing
5977 skb_queue_tail(&chan->srej_q, skb);
5979 BT_DBG("Queued %p (queue len %d)", skb,
5980 skb_queue_len(&chan->srej_q));
5982 l2cap_pass_to_tx(chan, control);
5983 l2cap_send_srej_list(chan, control->txseq);
5985 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5986 /* We've already queued this frame. Drop this copy. */
5987 l2cap_pass_to_tx(chan, control);
5989 case L2CAP_TXSEQ_DUPLICATE:
5990 /* Expecting a later sequence number, so this frame
5991 * was already received. Ignore it completely.
5994 case L2CAP_TXSEQ_INVALID_IGNORE:
5996 case L2CAP_TXSEQ_INVALID:
5998 l2cap_send_disconn_req(chan, ECONNRESET);
6002 case L2CAP_EV_RECV_RR:
6003 l2cap_pass_to_tx(chan, control);
6004 if (control->final) {
6005 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6007 if (!test_and_clear_bit(CONN_REJ_ACT,
6008 &chan->conn_state)) {
6010 l2cap_retransmit_all(chan, control);
6013 l2cap_ertm_send(chan);
6014 } else if (control->poll) {
6015 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6016 &chan->conn_state) &&
6017 chan->unacked_frames) {
6018 __set_retrans_timer(chan);
6021 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6022 l2cap_send_srej_tail(chan);
6024 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6025 &chan->conn_state) &&
6026 chan->unacked_frames)
6027 __set_retrans_timer(chan);
6029 l2cap_send_ack(chan);
6032 case L2CAP_EV_RECV_RNR:
6033 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6034 l2cap_pass_to_tx(chan, control);
6035 if (control->poll) {
6036 l2cap_send_srej_tail(chan);
6038 struct l2cap_ctrl rr_control;
6039 memset(&rr_control, 0, sizeof(rr_control));
6040 rr_control.sframe = 1;
6041 rr_control.super = L2CAP_SUPER_RR;
6042 rr_control.reqseq = chan->buffer_seq;
6043 l2cap_send_sframe(chan, &rr_control);
6047 case L2CAP_EV_RECV_REJ:
6048 l2cap_handle_rej(chan, control);
6050 case L2CAP_EV_RECV_SREJ:
6051 l2cap_handle_srej(chan, control);
6055 if (skb && !skb_in_use) {
6056 BT_DBG("Freeing %p", skb);
6063 static int l2cap_finish_move(struct l2cap_chan *chan)
6065 BT_DBG("chan %p", chan);
6067 chan->rx_state = L2CAP_RX_STATE_RECV;
6070 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6072 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6074 return l2cap_resegment(chan);
6077 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6078 struct l2cap_ctrl *control,
6079 struct sk_buff *skb, u8 event)
6083 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6089 l2cap_process_reqseq(chan, control->reqseq);
6091 if (!skb_queue_empty(&chan->tx_q))
6092 chan->tx_send_head = skb_peek(&chan->tx_q);
6094 chan->tx_send_head = NULL;
6096 /* Rewind next_tx_seq to the point expected
6099 chan->next_tx_seq = control->reqseq;
6100 chan->unacked_frames = 0;
6102 err = l2cap_finish_move(chan);
6106 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6107 l2cap_send_i_or_rr_or_rnr(chan);
6109 if (event == L2CAP_EV_RECV_IFRAME)
6112 return l2cap_rx_state_recv(chan, control, NULL, event);
6115 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6116 struct l2cap_ctrl *control,
6117 struct sk_buff *skb, u8 event)
6121 if (!control->final)
6124 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6126 chan->rx_state = L2CAP_RX_STATE_RECV;
6127 l2cap_process_reqseq(chan, control->reqseq);
6129 if (!skb_queue_empty(&chan->tx_q))
6130 chan->tx_send_head = skb_peek(&chan->tx_q);
6132 chan->tx_send_head = NULL;
6134 /* Rewind next_tx_seq to the point expected
6137 chan->next_tx_seq = control->reqseq;
6138 chan->unacked_frames = 0;
6141 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6143 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6145 err = l2cap_resegment(chan);
6148 err = l2cap_rx_state_recv(chan, control, skb, event);
6153 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6155 /* Make sure reqseq is for a packet that has been sent but not acked */
6158 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6159 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6162 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6163 struct sk_buff *skb, u8 event)
6167 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6168 control, skb, event, chan->rx_state);
6170 if (__valid_reqseq(chan, control->reqseq)) {
6171 switch (chan->rx_state) {
6172 case L2CAP_RX_STATE_RECV:
6173 err = l2cap_rx_state_recv(chan, control, skb, event);
6175 case L2CAP_RX_STATE_SREJ_SENT:
6176 err = l2cap_rx_state_srej_sent(chan, control, skb,
6179 case L2CAP_RX_STATE_WAIT_P:
6180 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6182 case L2CAP_RX_STATE_WAIT_F:
6183 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6190 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6191 control->reqseq, chan->next_tx_seq,
6192 chan->expected_ack_seq);
6193 l2cap_send_disconn_req(chan, ECONNRESET);
6199 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6200 struct sk_buff *skb)
6204 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6207 if (l2cap_classify_txseq(chan, control->txseq) ==
6208 L2CAP_TXSEQ_EXPECTED) {
6209 l2cap_pass_to_tx(chan, control);
6211 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6212 __next_seq(chan, chan->buffer_seq));
6214 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6216 l2cap_reassemble_sdu(chan, skb, control);
6219 kfree_skb(chan->sdu);
6222 chan->sdu_last_frag = NULL;
6226 BT_DBG("Freeing %p", skb);
6231 chan->last_acked_seq = control->txseq;
6232 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6237 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6239 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6243 __unpack_control(chan, skb);
6248 * We can just drop the corrupted I-frame here.
6249 * Receiver will miss it and start proper recovery
6250 * procedures and ask for retransmission.
6252 if (l2cap_check_fcs(chan, skb))
6255 if (!control->sframe && control->sar == L2CAP_SAR_START)
6256 len -= L2CAP_SDULEN_SIZE;
6258 if (chan->fcs == L2CAP_FCS_CRC16)
6259 len -= L2CAP_FCS_SIZE;
6261 if (len > chan->mps) {
6262 l2cap_send_disconn_req(chan, ECONNRESET);
6266 if (!control->sframe) {
6269 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6270 control->sar, control->reqseq, control->final,
6273 /* Validate F-bit - F=0 always valid, F=1 only
6274 * valid in TX WAIT_F
6276 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6279 if (chan->mode != L2CAP_MODE_STREAMING) {
6280 event = L2CAP_EV_RECV_IFRAME;
6281 err = l2cap_rx(chan, control, skb, event);
6283 err = l2cap_stream_rx(chan, control, skb);
6287 l2cap_send_disconn_req(chan, ECONNRESET);
6289 const u8 rx_func_to_event[4] = {
6290 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6291 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6294 /* Only I-frames are expected in streaming mode */
6295 if (chan->mode == L2CAP_MODE_STREAMING)
6298 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6299 control->reqseq, control->final, control->poll,
6303 BT_ERR("Trailing bytes: %d in sframe", len);
6304 l2cap_send_disconn_req(chan, ECONNRESET);
6308 /* Validate F and P bits */
6309 if (control->final && (control->poll ||
6310 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6313 event = rx_func_to_event[control->super];
6314 if (l2cap_rx(chan, control, skb, event))
6315 l2cap_send_disconn_req(chan, ECONNRESET);
6325 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6326 struct sk_buff *skb)
6328 struct l2cap_chan *chan;
6330 chan = l2cap_get_chan_by_scid(conn, cid);
6332 if (cid == L2CAP_CID_A2MP) {
6333 chan = a2mp_channel_create(conn, skb);
6339 l2cap_chan_lock(chan);
6341 BT_DBG("unknown cid 0x%4.4x", cid);
6342 /* Drop packet and return */
6348 BT_DBG("chan %p, len %d", chan, skb->len);
6350 if (chan->state != BT_CONNECTED)
6353 switch (chan->mode) {
6354 case L2CAP_MODE_BASIC:
6355 /* If socket recv buffers overflows we drop data here
6356 * which is *bad* because L2CAP has to be reliable.
6357 * But we don't have any other choice. L2CAP doesn't
6358 * provide flow control mechanism. */
6360 if (chan->imtu < skb->len)
6363 if (!chan->ops->recv(chan, skb))
6367 case L2CAP_MODE_ERTM:
6368 case L2CAP_MODE_STREAMING:
6369 l2cap_data_rcv(chan, skb);
6373 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6381 l2cap_chan_unlock(chan);
6384 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6385 struct sk_buff *skb)
6387 struct hci_conn *hcon = conn->hcon;
6388 struct l2cap_chan *chan;
6390 if (hcon->type != ACL_LINK)
6393 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6398 BT_DBG("chan %p, len %d", chan, skb->len);
6400 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6403 if (chan->imtu < skb->len)
6406 /* Store remote BD_ADDR and PSM for msg_name */
6407 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6408 bt_cb(skb)->psm = psm;
6410 if (!chan->ops->recv(chan, skb))
6417 static void l2cap_att_channel(struct l2cap_conn *conn,
6418 struct sk_buff *skb)
6420 struct hci_conn *hcon = conn->hcon;
6421 struct l2cap_chan *chan;
6423 if (hcon->type != LE_LINK)
6426 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6427 &hcon->src, &hcon->dst);
6431 BT_DBG("chan %p, len %d", chan, skb->len);
6433 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6436 if (chan->imtu < skb->len)
6439 if (!chan->ops->recv(chan, skb))
6446 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6448 struct l2cap_hdr *lh = (void *) skb->data;
6452 skb_pull(skb, L2CAP_HDR_SIZE);
6453 cid = __le16_to_cpu(lh->cid);
6454 len = __le16_to_cpu(lh->len);
6456 if (len != skb->len) {
6461 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6464 case L2CAP_CID_SIGNALING:
6465 l2cap_sig_channel(conn, skb);
6468 case L2CAP_CID_CONN_LESS:
6469 psm = get_unaligned((__le16 *) skb->data);
6470 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6471 l2cap_conless_channel(conn, psm, skb);
6475 l2cap_att_channel(conn, skb);
6478 case L2CAP_CID_LE_SIGNALING:
6479 l2cap_le_sig_channel(conn, skb);
6483 if (smp_sig_channel(conn, skb))
6484 l2cap_conn_del(conn->hcon, EACCES);
6488 l2cap_data_channel(conn, cid, skb);
6493 /* ---- L2CAP interface with lower layer (HCI) ---- */
6495 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6497 int exact = 0, lm1 = 0, lm2 = 0;
6498 struct l2cap_chan *c;
6500 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6502 /* Find listening sockets and check their link_mode */
6503 read_lock(&chan_list_lock);
6504 list_for_each_entry(c, &chan_list, global_l) {
6505 if (c->state != BT_LISTEN)
6508 if (!bacmp(&c->src, &hdev->bdaddr)) {
6509 lm1 |= HCI_LM_ACCEPT;
6510 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6511 lm1 |= HCI_LM_MASTER;
6513 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6514 lm2 |= HCI_LM_ACCEPT;
6515 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6516 lm2 |= HCI_LM_MASTER;
6519 read_unlock(&chan_list_lock);
6521 return exact ? lm1 : lm2;
6524 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6526 struct l2cap_conn *conn;
6528 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6531 conn = l2cap_conn_add(hcon);
6533 l2cap_conn_ready(conn);
6535 l2cap_conn_del(hcon, bt_to_errno(status));
6539 int l2cap_disconn_ind(struct hci_conn *hcon)
6541 struct l2cap_conn *conn = hcon->l2cap_data;
6543 BT_DBG("hcon %p", hcon);
6546 return HCI_ERROR_REMOTE_USER_TERM;
6547 return conn->disc_reason;
6550 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6552 BT_DBG("hcon %p reason %d", hcon, reason);
6554 l2cap_conn_del(hcon, bt_to_errno(reason));
6557 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6559 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6562 if (encrypt == 0x00) {
6563 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6564 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6565 } else if (chan->sec_level == BT_SECURITY_HIGH)
6566 l2cap_chan_close(chan, ECONNREFUSED);
6568 if (chan->sec_level == BT_SECURITY_MEDIUM)
6569 __clear_chan_timer(chan);
6573 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6575 struct l2cap_conn *conn = hcon->l2cap_data;
6576 struct l2cap_chan *chan;
6581 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6583 if (hcon->type == LE_LINK) {
6584 if (!status && encrypt)
6585 smp_distribute_keys(conn, 0);
6586 cancel_delayed_work(&conn->security_timer);
6589 mutex_lock(&conn->chan_lock);
6591 list_for_each_entry(chan, &conn->chan_l, list) {
6592 l2cap_chan_lock(chan);
6594 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6595 state_to_string(chan->state));
6597 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6598 l2cap_chan_unlock(chan);
6602 if (chan->scid == L2CAP_CID_ATT) {
6603 if (!status && encrypt) {
6604 chan->sec_level = hcon->sec_level;
6605 l2cap_chan_ready(chan);
6608 l2cap_chan_unlock(chan);
6612 if (!__l2cap_no_conn_pending(chan)) {
6613 l2cap_chan_unlock(chan);
6617 if (!status && (chan->state == BT_CONNECTED ||
6618 chan->state == BT_CONFIG)) {
6619 chan->ops->resume(chan);
6620 l2cap_check_encryption(chan, encrypt);
6621 l2cap_chan_unlock(chan);
6625 if (chan->state == BT_CONNECT) {
6627 l2cap_start_connection(chan);
6629 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6630 } else if (chan->state == BT_CONNECT2) {
6631 struct l2cap_conn_rsp rsp;
6635 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6636 res = L2CAP_CR_PEND;
6637 stat = L2CAP_CS_AUTHOR_PEND;
6638 chan->ops->defer(chan);
6640 l2cap_state_change(chan, BT_CONFIG);
6641 res = L2CAP_CR_SUCCESS;
6642 stat = L2CAP_CS_NO_INFO;
6645 l2cap_state_change(chan, BT_DISCONN);
6646 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6647 res = L2CAP_CR_SEC_BLOCK;
6648 stat = L2CAP_CS_NO_INFO;
6651 rsp.scid = cpu_to_le16(chan->dcid);
6652 rsp.dcid = cpu_to_le16(chan->scid);
6653 rsp.result = cpu_to_le16(res);
6654 rsp.status = cpu_to_le16(stat);
6655 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6658 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6659 res == L2CAP_CR_SUCCESS) {
6661 set_bit(CONF_REQ_SENT, &chan->conf_state);
6662 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6664 l2cap_build_conf_req(chan, buf),
6666 chan->num_conf_req++;
6670 l2cap_chan_unlock(chan);
6673 mutex_unlock(&conn->chan_lock);
6678 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6680 struct l2cap_conn *conn = hcon->l2cap_data;
6681 struct l2cap_hdr *hdr;
6684 /* For AMP controller do not create l2cap conn */
6685 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6689 conn = l2cap_conn_add(hcon);
6694 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6698 case ACL_START_NO_FLUSH:
6701 BT_ERR("Unexpected start frame (len %d)", skb->len);
6702 kfree_skb(conn->rx_skb);
6703 conn->rx_skb = NULL;
6705 l2cap_conn_unreliable(conn, ECOMM);
6708 /* Start fragment always begin with Basic L2CAP header */
6709 if (skb->len < L2CAP_HDR_SIZE) {
6710 BT_ERR("Frame is too short (len %d)", skb->len);
6711 l2cap_conn_unreliable(conn, ECOMM);
6715 hdr = (struct l2cap_hdr *) skb->data;
6716 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6718 if (len == skb->len) {
6719 /* Complete frame received */
6720 l2cap_recv_frame(conn, skb);
6724 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6726 if (skb->len > len) {
6727 BT_ERR("Frame is too long (len %d, expected len %d)",
6729 l2cap_conn_unreliable(conn, ECOMM);
6733 /* Allocate skb for the complete frame (with header) */
6734 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6738 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6740 conn->rx_len = len - skb->len;
6744 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6746 if (!conn->rx_len) {
6747 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6748 l2cap_conn_unreliable(conn, ECOMM);
6752 if (skb->len > conn->rx_len) {
6753 BT_ERR("Fragment is too long (len %d, expected %d)",
6754 skb->len, conn->rx_len);
6755 kfree_skb(conn->rx_skb);
6756 conn->rx_skb = NULL;
6758 l2cap_conn_unreliable(conn, ECOMM);
6762 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6764 conn->rx_len -= skb->len;
6766 if (!conn->rx_len) {
6767 /* Complete frame received. l2cap_recv_frame
6768 * takes ownership of the skb so set the global
6769 * rx_skb pointer to NULL first.
6771 struct sk_buff *rx_skb = conn->rx_skb;
6772 conn->rx_skb = NULL;
6773 l2cap_recv_frame(conn, rx_skb);
6783 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6785 struct l2cap_chan *c;
6787 read_lock(&chan_list_lock);
6789 list_for_each_entry(c, &chan_list, global_l) {
6790 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6792 c->state, __le16_to_cpu(c->psm),
6793 c->scid, c->dcid, c->imtu, c->omtu,
6794 c->sec_level, c->mode);
6797 read_unlock(&chan_list_lock);
6802 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6804 return single_open(file, l2cap_debugfs_show, inode->i_private);
6807 static const struct file_operations l2cap_debugfs_fops = {
6808 .open = l2cap_debugfs_open,
6810 .llseek = seq_lseek,
6811 .release = single_release,
6814 static struct dentry *l2cap_debugfs;
6816 int __init l2cap_init(void)
6820 err = l2cap_init_sockets();
6824 if (IS_ERR_OR_NULL(bt_debugfs))
6827 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6828 NULL, &l2cap_debugfs_fops);
6833 void l2cap_exit(void)
6835 debugfs_remove(l2cap_debugfs);
6836 l2cap_cleanup_sockets();
6839 module_param(disable_ertm, bool, 0644);
6840 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");