2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
68 list_for_each_entry(c, &conn->chan_l, list) {
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
80 list_for_each_entry(c, &conn->chan_l, list) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
98 mutex_unlock(&conn->chan_lock);
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
115 mutex_unlock(&conn->chan_lock);
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
123 struct l2cap_chan *c;
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
141 mutex_unlock(&conn->chan_lock);
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
161 write_lock(&chan_list_lock);
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
186 write_unlock(&chan_list_lock);
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192 write_lock(&chan_list_lock);
196 write_unlock(&chan_list_lock);
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203 u16 cid = L2CAP_CID_DYN_START;
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
219 chan->ops->state_change(chan, state);
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
224 struct sock *sk = chan->sk;
227 __l2cap_state_change(chan, state);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233 struct sock *sk = chan->sk;
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240 struct sock *sk = chan->sk;
243 __l2cap_chan_set_err(chan, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 chan->scid = L2CAP_CID_LE_DATA;
508 chan->dcid = L2CAP_CID_LE_DATA;
510 /* Alloc CID for connection-oriented socket */
511 chan->scid = l2cap_alloc_cid(conn);
512 chan->omtu = L2CAP_DEFAULT_MTU;
516 case L2CAP_CHAN_CONN_LESS:
517 /* Connectionless socket */
518 chan->scid = L2CAP_CID_CONN_LESS;
519 chan->dcid = L2CAP_CID_CONN_LESS;
520 chan->omtu = L2CAP_DEFAULT_MTU;
523 case L2CAP_CHAN_CONN_FIX_A2MP:
524 chan->scid = L2CAP_CID_A2MP;
525 chan->dcid = L2CAP_CID_A2MP;
526 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
531 /* Raw socket can send/recv signalling messages only */
532 chan->scid = L2CAP_CID_SIGNALING;
533 chan->dcid = L2CAP_CID_SIGNALING;
534 chan->omtu = L2CAP_DEFAULT_MTU;
537 chan->local_id = L2CAP_BESTEFFORT_ID;
538 chan->local_stype = L2CAP_SERV_BESTEFFORT;
539 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
540 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
541 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
542 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
544 l2cap_chan_hold(chan);
546 list_add(&chan->list, &conn->chan_l);
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 mutex_lock(&conn->chan_lock);
552 __l2cap_chan_add(conn, chan);
553 mutex_unlock(&conn->chan_lock);
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
558 struct l2cap_conn *conn = chan->conn;
560 __clear_chan_timer(chan);
562 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 /* Delete from channel list */
567 list_del(&chan->list);
569 l2cap_chan_put(chan);
573 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 hci_conn_put(conn->hcon);
576 if (mgr && mgr->bredr_chan == chan)
577 mgr->bredr_chan = NULL;
580 chan->ops->teardown(chan, err);
582 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
586 case L2CAP_MODE_BASIC:
589 case L2CAP_MODE_ERTM:
590 __clear_retrans_timer(chan);
591 __clear_monitor_timer(chan);
592 __clear_ack_timer(chan);
594 skb_queue_purge(&chan->srej_q);
596 l2cap_seq_list_free(&chan->srej_list);
597 l2cap_seq_list_free(&chan->retrans_list);
601 case L2CAP_MODE_STREAMING:
602 skb_queue_purge(&chan->tx_q);
609 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
611 struct l2cap_conn *conn = chan->conn;
612 struct sock *sk = chan->sk;
614 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
617 switch (chan->state) {
619 chan->ops->teardown(chan, 0);
624 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
625 conn->hcon->type == ACL_LINK) {
626 __set_chan_timer(chan, sk->sk_sndtimeo);
627 l2cap_send_disconn_req(conn, chan, reason);
629 l2cap_chan_del(chan, reason);
633 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 conn->hcon->type == ACL_LINK) {
635 struct l2cap_conn_rsp rsp;
638 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
639 result = L2CAP_CR_SEC_BLOCK;
641 result = L2CAP_CR_BAD_PSM;
642 l2cap_state_change(chan, BT_DISCONN);
644 rsp.scid = cpu_to_le16(chan->dcid);
645 rsp.dcid = cpu_to_le16(chan->scid);
646 rsp.result = cpu_to_le16(result);
647 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
648 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
652 l2cap_chan_del(chan, reason);
657 l2cap_chan_del(chan, reason);
661 chan->ops->teardown(chan, 0);
666 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
668 if (chan->chan_type == L2CAP_CHAN_RAW) {
669 switch (chan->sec_level) {
670 case BT_SECURITY_HIGH:
671 return HCI_AT_DEDICATED_BONDING_MITM;
672 case BT_SECURITY_MEDIUM:
673 return HCI_AT_DEDICATED_BONDING;
675 return HCI_AT_NO_BONDING;
677 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
678 if (chan->sec_level == BT_SECURITY_LOW)
679 chan->sec_level = BT_SECURITY_SDP;
681 if (chan->sec_level == BT_SECURITY_HIGH)
682 return HCI_AT_NO_BONDING_MITM;
684 return HCI_AT_NO_BONDING;
686 switch (chan->sec_level) {
687 case BT_SECURITY_HIGH:
688 return HCI_AT_GENERAL_BONDING_MITM;
689 case BT_SECURITY_MEDIUM:
690 return HCI_AT_GENERAL_BONDING;
692 return HCI_AT_NO_BONDING;
697 /* Service level security */
698 int l2cap_chan_check_security(struct l2cap_chan *chan)
700 struct l2cap_conn *conn = chan->conn;
703 auth_type = l2cap_get_auth_type(chan);
705 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
708 static u8 l2cap_get_ident(struct l2cap_conn *conn)
712 /* Get next available identificator.
713 * 1 - 128 are used by kernel.
714 * 129 - 199 are reserved.
715 * 200 - 254 are used by utilities like l2ping, etc.
718 spin_lock(&conn->lock);
720 if (++conn->tx_ident > 128)
725 spin_unlock(&conn->lock);
730 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
733 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
736 BT_DBG("code 0x%2.2x", code);
741 if (lmp_no_flush_capable(conn->hcon->hdev))
742 flags = ACL_START_NO_FLUSH;
746 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
747 skb->priority = HCI_PRIO_MAX;
749 hci_send_acl(conn->hchan, skb, flags);
752 static bool __chan_is_moving(struct l2cap_chan *chan)
754 return chan->move_state != L2CAP_MOVE_STABLE &&
755 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
758 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
760 struct hci_conn *hcon = chan->conn->hcon;
763 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
766 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
767 lmp_no_flush_capable(hcon->hdev))
768 flags = ACL_START_NO_FLUSH;
772 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
773 hci_send_acl(chan->conn->hchan, skb, flags);
776 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
778 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
779 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
781 if (enh & L2CAP_CTRL_FRAME_TYPE) {
784 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
785 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
792 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
793 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
800 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
802 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
803 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
805 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
808 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
809 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
816 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
817 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
824 static inline void __unpack_control(struct l2cap_chan *chan,
827 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
828 __unpack_extended_control(get_unaligned_le32(skb->data),
829 &bt_cb(skb)->control);
830 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
832 __unpack_enhanced_control(get_unaligned_le16(skb->data),
833 &bt_cb(skb)->control);
834 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
838 static u32 __pack_extended_control(struct l2cap_ctrl *control)
842 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
843 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
845 if (control->sframe) {
846 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
847 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
848 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
850 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
851 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
857 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
861 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
862 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
864 if (control->sframe) {
865 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
866 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
867 packed |= L2CAP_CTRL_FRAME_TYPE;
869 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
870 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
876 static inline void __pack_control(struct l2cap_chan *chan,
877 struct l2cap_ctrl *control,
880 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
881 put_unaligned_le32(__pack_extended_control(control),
882 skb->data + L2CAP_HDR_SIZE);
884 put_unaligned_le16(__pack_enhanced_control(control),
885 skb->data + L2CAP_HDR_SIZE);
889 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
891 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
892 return L2CAP_EXT_HDR_SIZE;
894 return L2CAP_ENH_HDR_SIZE;
897 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
901 struct l2cap_hdr *lh;
902 int hlen = __ertm_hdr_size(chan);
904 if (chan->fcs == L2CAP_FCS_CRC16)
905 hlen += L2CAP_FCS_SIZE;
907 skb = bt_skb_alloc(hlen, GFP_KERNEL);
910 return ERR_PTR(-ENOMEM);
912 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
913 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
914 lh->cid = cpu_to_le16(chan->dcid);
916 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
917 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
919 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
921 if (chan->fcs == L2CAP_FCS_CRC16) {
922 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
923 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
926 skb->priority = HCI_PRIO_MAX;
930 static void l2cap_send_sframe(struct l2cap_chan *chan,
931 struct l2cap_ctrl *control)
936 BT_DBG("chan %p, control %p", chan, control);
938 if (!control->sframe)
941 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
945 if (control->super == L2CAP_SUPER_RR)
946 clear_bit(CONN_RNR_SENT, &chan->conn_state);
947 else if (control->super == L2CAP_SUPER_RNR)
948 set_bit(CONN_RNR_SENT, &chan->conn_state);
950 if (control->super != L2CAP_SUPER_SREJ) {
951 chan->last_acked_seq = control->reqseq;
952 __clear_ack_timer(chan);
955 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
956 control->final, control->poll, control->super);
958 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
959 control_field = __pack_extended_control(control);
961 control_field = __pack_enhanced_control(control);
963 skb = l2cap_create_sframe_pdu(chan, control_field);
965 l2cap_do_send(chan, skb);
968 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
970 struct l2cap_ctrl control;
972 BT_DBG("chan %p, poll %d", chan, poll);
974 memset(&control, 0, sizeof(control));
978 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
979 control.super = L2CAP_SUPER_RNR;
981 control.super = L2CAP_SUPER_RR;
983 control.reqseq = chan->buffer_seq;
984 l2cap_send_sframe(chan, &control);
987 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
989 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
992 static bool __amp_capable(struct l2cap_chan *chan)
994 struct l2cap_conn *conn = chan->conn;
997 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
998 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1004 void l2cap_send_conn_req(struct l2cap_chan *chan)
1006 struct l2cap_conn *conn = chan->conn;
1007 struct l2cap_conn_req req;
1009 req.scid = cpu_to_le16(chan->scid);
1010 req.psm = chan->psm;
1012 chan->ident = l2cap_get_ident(conn);
1014 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1016 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1019 static void l2cap_move_setup(struct l2cap_chan *chan)
1021 struct sk_buff *skb;
1023 BT_DBG("chan %p", chan);
1025 if (chan->mode != L2CAP_MODE_ERTM)
1028 __clear_retrans_timer(chan);
1029 __clear_monitor_timer(chan);
1030 __clear_ack_timer(chan);
1032 chan->retry_count = 0;
1033 skb_queue_walk(&chan->tx_q, skb) {
1034 if (bt_cb(skb)->control.retries)
1035 bt_cb(skb)->control.retries = 1;
1040 chan->expected_tx_seq = chan->buffer_seq;
1042 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1043 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1044 l2cap_seq_list_clear(&chan->retrans_list);
1045 l2cap_seq_list_clear(&chan->srej_list);
1046 skb_queue_purge(&chan->srej_q);
1048 chan->tx_state = L2CAP_TX_STATE_XMIT;
1049 chan->rx_state = L2CAP_RX_STATE_MOVE;
1051 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1054 static void l2cap_move_done(struct l2cap_chan *chan)
1056 u8 move_role = chan->move_role;
1057 BT_DBG("chan %p", chan);
1059 chan->move_state = L2CAP_MOVE_STABLE;
1060 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1062 if (chan->mode != L2CAP_MODE_ERTM)
1065 switch (move_role) {
1066 case L2CAP_MOVE_ROLE_INITIATOR:
1067 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1068 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1070 case L2CAP_MOVE_ROLE_RESPONDER:
1071 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1076 static void l2cap_chan_ready(struct l2cap_chan *chan)
1078 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1079 chan->conf_state = 0;
1080 __clear_chan_timer(chan);
1082 chan->state = BT_CONNECTED;
1084 chan->ops->ready(chan);
1087 static void l2cap_start_connection(struct l2cap_chan *chan)
1089 if (__amp_capable(chan)) {
1090 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1091 a2mp_discover_amp(chan);
1093 l2cap_send_conn_req(chan);
1097 static void l2cap_do_start(struct l2cap_chan *chan)
1099 struct l2cap_conn *conn = chan->conn;
1101 if (conn->hcon->type == LE_LINK) {
1102 l2cap_chan_ready(chan);
1106 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1107 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1110 if (l2cap_chan_check_security(chan) &&
1111 __l2cap_no_conn_pending(chan)) {
1112 l2cap_start_connection(chan);
1115 struct l2cap_info_req req;
1116 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1118 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1119 conn->info_ident = l2cap_get_ident(conn);
1121 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1123 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1128 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1130 u32 local_feat_mask = l2cap_feat_mask;
1132 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1135 case L2CAP_MODE_ERTM:
1136 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1137 case L2CAP_MODE_STREAMING:
1138 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1144 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1145 struct l2cap_chan *chan, int err)
1147 struct sock *sk = chan->sk;
1148 struct l2cap_disconn_req req;
1153 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1154 __clear_retrans_timer(chan);
1155 __clear_monitor_timer(chan);
1156 __clear_ack_timer(chan);
1159 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1160 l2cap_state_change(chan, BT_DISCONN);
1164 req.dcid = cpu_to_le16(chan->dcid);
1165 req.scid = cpu_to_le16(chan->scid);
1166 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1170 __l2cap_state_change(chan, BT_DISCONN);
1171 __l2cap_chan_set_err(chan, err);
1175 /* ---- L2CAP connections ---- */
1176 static void l2cap_conn_start(struct l2cap_conn *conn)
1178 struct l2cap_chan *chan, *tmp;
1180 BT_DBG("conn %p", conn);
1182 mutex_lock(&conn->chan_lock);
1184 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1185 struct sock *sk = chan->sk;
1187 l2cap_chan_lock(chan);
1189 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1190 l2cap_chan_unlock(chan);
1194 if (chan->state == BT_CONNECT) {
1195 if (!l2cap_chan_check_security(chan) ||
1196 !__l2cap_no_conn_pending(chan)) {
1197 l2cap_chan_unlock(chan);
1201 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1202 && test_bit(CONF_STATE2_DEVICE,
1203 &chan->conf_state)) {
1204 l2cap_chan_close(chan, ECONNRESET);
1205 l2cap_chan_unlock(chan);
1209 l2cap_start_connection(chan);
1211 } else if (chan->state == BT_CONNECT2) {
1212 struct l2cap_conn_rsp rsp;
1214 rsp.scid = cpu_to_le16(chan->dcid);
1215 rsp.dcid = cpu_to_le16(chan->scid);
1217 if (l2cap_chan_check_security(chan)) {
1219 if (test_bit(BT_SK_DEFER_SETUP,
1220 &bt_sk(sk)->flags)) {
1221 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1222 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1223 chan->ops->defer(chan);
1226 __l2cap_state_change(chan, BT_CONFIG);
1227 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1228 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1232 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1233 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1236 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1239 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1240 rsp.result != L2CAP_CR_SUCCESS) {
1241 l2cap_chan_unlock(chan);
1245 set_bit(CONF_REQ_SENT, &chan->conf_state);
1246 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1247 l2cap_build_conf_req(chan, buf), buf);
1248 chan->num_conf_req++;
1251 l2cap_chan_unlock(chan);
1254 mutex_unlock(&conn->chan_lock);
1257 /* Find socket with cid and source/destination bdaddr.
1258 * Returns closest match, locked.
1260 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1264 struct l2cap_chan *c, *c1 = NULL;
1266 read_lock(&chan_list_lock);
1268 list_for_each_entry(c, &chan_list, global_l) {
1269 struct sock *sk = c->sk;
1271 if (state && c->state != state)
1274 if (c->scid == cid) {
1275 int src_match, dst_match;
1276 int src_any, dst_any;
1279 src_match = !bacmp(&bt_sk(sk)->src, src);
1280 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1281 if (src_match && dst_match) {
1282 read_unlock(&chan_list_lock);
1287 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1288 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1289 if ((src_match && dst_any) || (src_any && dst_match) ||
1290 (src_any && dst_any))
1295 read_unlock(&chan_list_lock);
1300 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1302 struct sock *parent, *sk;
1303 struct l2cap_chan *chan, *pchan;
1307 /* Check if we have socket listening on cid */
1308 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1309 conn->src, conn->dst);
1317 chan = pchan->ops->new_connection(pchan);
1323 hci_conn_hold(conn->hcon);
1324 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1326 bacpy(&bt_sk(sk)->src, conn->src);
1327 bacpy(&bt_sk(sk)->dst, conn->dst);
1329 l2cap_chan_add(conn, chan);
1331 l2cap_chan_ready(chan);
1334 release_sock(parent);
1337 static void l2cap_conn_ready(struct l2cap_conn *conn)
1339 struct l2cap_chan *chan;
1340 struct hci_conn *hcon = conn->hcon;
1342 BT_DBG("conn %p", conn);
1344 if (!hcon->out && hcon->type == LE_LINK)
1345 l2cap_le_conn_ready(conn);
1347 if (hcon->out && hcon->type == LE_LINK)
1348 smp_conn_security(hcon, hcon->pending_sec_level);
1350 mutex_lock(&conn->chan_lock);
1352 list_for_each_entry(chan, &conn->chan_l, list) {
1354 l2cap_chan_lock(chan);
1356 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1357 l2cap_chan_unlock(chan);
1361 if (hcon->type == LE_LINK) {
1362 if (smp_conn_security(hcon, chan->sec_level))
1363 l2cap_chan_ready(chan);
1365 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1366 struct sock *sk = chan->sk;
1367 __clear_chan_timer(chan);
1369 __l2cap_state_change(chan, BT_CONNECTED);
1370 sk->sk_state_change(sk);
1373 } else if (chan->state == BT_CONNECT)
1374 l2cap_do_start(chan);
1376 l2cap_chan_unlock(chan);
1379 mutex_unlock(&conn->chan_lock);
1382 /* Notify sockets that we cannot guaranty reliability anymore */
1383 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1385 struct l2cap_chan *chan;
1387 BT_DBG("conn %p", conn);
1389 mutex_lock(&conn->chan_lock);
1391 list_for_each_entry(chan, &conn->chan_l, list) {
1392 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1393 l2cap_chan_set_err(chan, err);
1396 mutex_unlock(&conn->chan_lock);
1399 static void l2cap_info_timeout(struct work_struct *work)
1401 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1405 conn->info_ident = 0;
1407 l2cap_conn_start(conn);
1410 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1412 struct l2cap_conn *conn = hcon->l2cap_data;
1413 struct l2cap_chan *chan, *l;
1418 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1420 kfree_skb(conn->rx_skb);
1422 mutex_lock(&conn->chan_lock);
1425 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1426 l2cap_chan_hold(chan);
1427 l2cap_chan_lock(chan);
1429 l2cap_chan_del(chan, err);
1431 l2cap_chan_unlock(chan);
1433 chan->ops->close(chan);
1434 l2cap_chan_put(chan);
1437 mutex_unlock(&conn->chan_lock);
1439 hci_chan_del(conn->hchan);
1441 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1442 cancel_delayed_work_sync(&conn->info_timer);
1444 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1445 cancel_delayed_work_sync(&conn->security_timer);
1446 smp_chan_destroy(conn);
1449 hcon->l2cap_data = NULL;
1453 static void security_timeout(struct work_struct *work)
1455 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1456 security_timer.work);
1458 BT_DBG("conn %p", conn);
1460 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1461 smp_chan_destroy(conn);
1462 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1466 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1468 struct l2cap_conn *conn = hcon->l2cap_data;
1469 struct hci_chan *hchan;
1474 hchan = hci_chan_create(hcon);
1478 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1480 hci_chan_del(hchan);
1484 hcon->l2cap_data = conn;
1486 conn->hchan = hchan;
1488 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1490 switch (hcon->type) {
1492 conn->mtu = hcon->hdev->block_mtu;
1496 if (hcon->hdev->le_mtu) {
1497 conn->mtu = hcon->hdev->le_mtu;
1503 conn->mtu = hcon->hdev->acl_mtu;
1507 conn->src = &hcon->hdev->bdaddr;
1508 conn->dst = &hcon->dst;
1510 conn->feat_mask = 0;
1512 spin_lock_init(&conn->lock);
1513 mutex_init(&conn->chan_lock);
1515 INIT_LIST_HEAD(&conn->chan_l);
1517 if (hcon->type == LE_LINK)
1518 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1520 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1522 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1527 /* ---- Socket interface ---- */
1529 /* Find socket with psm and source / destination bdaddr.
1530 * Returns closest match.
1532 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1536 struct l2cap_chan *c, *c1 = NULL;
1538 read_lock(&chan_list_lock);
1540 list_for_each_entry(c, &chan_list, global_l) {
1541 struct sock *sk = c->sk;
1543 if (state && c->state != state)
1546 if (c->psm == psm) {
1547 int src_match, dst_match;
1548 int src_any, dst_any;
1551 src_match = !bacmp(&bt_sk(sk)->src, src);
1552 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1553 if (src_match && dst_match) {
1554 read_unlock(&chan_list_lock);
1559 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1560 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1561 if ((src_match && dst_any) || (src_any && dst_match) ||
1562 (src_any && dst_any))
1567 read_unlock(&chan_list_lock);
1572 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1573 bdaddr_t *dst, u8 dst_type)
1575 struct sock *sk = chan->sk;
1576 bdaddr_t *src = &bt_sk(sk)->src;
1577 struct l2cap_conn *conn;
1578 struct hci_conn *hcon;
1579 struct hci_dev *hdev;
1583 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1584 dst_type, __le16_to_cpu(psm));
1586 hdev = hci_get_route(dst, src);
1588 return -EHOSTUNREACH;
1592 l2cap_chan_lock(chan);
1594 /* PSM must be odd and lsb of upper byte must be 0 */
1595 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1596 chan->chan_type != L2CAP_CHAN_RAW) {
1601 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1606 switch (chan->mode) {
1607 case L2CAP_MODE_BASIC:
1609 case L2CAP_MODE_ERTM:
1610 case L2CAP_MODE_STREAMING:
1619 switch (chan->state) {
1623 /* Already connecting */
1628 /* Already connected */
1642 /* Set destination address and psm */
1644 bacpy(&bt_sk(sk)->dst, dst);
1650 auth_type = l2cap_get_auth_type(chan);
1652 if (chan->dcid == L2CAP_CID_LE_DATA)
1653 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1654 chan->sec_level, auth_type);
1656 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1657 chan->sec_level, auth_type);
1660 err = PTR_ERR(hcon);
1664 conn = l2cap_conn_add(hcon, 0);
1671 if (hcon->type == LE_LINK) {
1674 if (!list_empty(&conn->chan_l)) {
1683 /* Update source addr of the socket */
1684 bacpy(src, conn->src);
1686 l2cap_chan_unlock(chan);
1687 l2cap_chan_add(conn, chan);
1688 l2cap_chan_lock(chan);
1690 l2cap_state_change(chan, BT_CONNECT);
1691 __set_chan_timer(chan, sk->sk_sndtimeo);
1693 if (hcon->state == BT_CONNECTED) {
1694 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1695 __clear_chan_timer(chan);
1696 if (l2cap_chan_check_security(chan))
1697 l2cap_state_change(chan, BT_CONNECTED);
1699 l2cap_do_start(chan);
1705 l2cap_chan_unlock(chan);
1706 hci_dev_unlock(hdev);
1711 int __l2cap_wait_ack(struct sock *sk)
1713 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1714 DECLARE_WAITQUEUE(wait, current);
1718 add_wait_queue(sk_sleep(sk), &wait);
1719 set_current_state(TASK_INTERRUPTIBLE);
1720 while (chan->unacked_frames > 0 && chan->conn) {
1724 if (signal_pending(current)) {
1725 err = sock_intr_errno(timeo);
1730 timeo = schedule_timeout(timeo);
1732 set_current_state(TASK_INTERRUPTIBLE);
1734 err = sock_error(sk);
1738 set_current_state(TASK_RUNNING);
1739 remove_wait_queue(sk_sleep(sk), &wait);
1743 static void l2cap_monitor_timeout(struct work_struct *work)
1745 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1746 monitor_timer.work);
1748 BT_DBG("chan %p", chan);
1750 l2cap_chan_lock(chan);
1753 l2cap_chan_unlock(chan);
1754 l2cap_chan_put(chan);
1758 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1760 l2cap_chan_unlock(chan);
1761 l2cap_chan_put(chan);
1764 static void l2cap_retrans_timeout(struct work_struct *work)
1766 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1767 retrans_timer.work);
1769 BT_DBG("chan %p", chan);
1771 l2cap_chan_lock(chan);
1774 l2cap_chan_unlock(chan);
1775 l2cap_chan_put(chan);
1779 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1780 l2cap_chan_unlock(chan);
1781 l2cap_chan_put(chan);
1784 static void l2cap_streaming_send(struct l2cap_chan *chan,
1785 struct sk_buff_head *skbs)
1787 struct sk_buff *skb;
1788 struct l2cap_ctrl *control;
1790 BT_DBG("chan %p, skbs %p", chan, skbs);
1792 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1794 while (!skb_queue_empty(&chan->tx_q)) {
1796 skb = skb_dequeue(&chan->tx_q);
1798 bt_cb(skb)->control.retries = 1;
1799 control = &bt_cb(skb)->control;
1801 control->reqseq = 0;
1802 control->txseq = chan->next_tx_seq;
1804 __pack_control(chan, control, skb);
1806 if (chan->fcs == L2CAP_FCS_CRC16) {
1807 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1808 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1811 l2cap_do_send(chan, skb);
1813 BT_DBG("Sent txseq %u", control->txseq);
1815 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1816 chan->frames_sent++;
1820 static int l2cap_ertm_send(struct l2cap_chan *chan)
1822 struct sk_buff *skb, *tx_skb;
1823 struct l2cap_ctrl *control;
1826 BT_DBG("chan %p", chan);
1828 if (chan->state != BT_CONNECTED)
1831 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1834 while (chan->tx_send_head &&
1835 chan->unacked_frames < chan->remote_tx_win &&
1836 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1838 skb = chan->tx_send_head;
1840 bt_cb(skb)->control.retries = 1;
1841 control = &bt_cb(skb)->control;
1843 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1846 control->reqseq = chan->buffer_seq;
1847 chan->last_acked_seq = chan->buffer_seq;
1848 control->txseq = chan->next_tx_seq;
1850 __pack_control(chan, control, skb);
1852 if (chan->fcs == L2CAP_FCS_CRC16) {
1853 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1854 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1857 /* Clone after data has been modified. Data is assumed to be
1858 read-only (for locking purposes) on cloned sk_buffs.
1860 tx_skb = skb_clone(skb, GFP_KERNEL);
1865 __set_retrans_timer(chan);
1867 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1868 chan->unacked_frames++;
1869 chan->frames_sent++;
1872 if (skb_queue_is_last(&chan->tx_q, skb))
1873 chan->tx_send_head = NULL;
1875 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1877 l2cap_do_send(chan, tx_skb);
1878 BT_DBG("Sent txseq %u", control->txseq);
1881 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1882 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1887 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1889 struct l2cap_ctrl control;
1890 struct sk_buff *skb;
1891 struct sk_buff *tx_skb;
1894 BT_DBG("chan %p", chan);
1896 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1899 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1900 seq = l2cap_seq_list_pop(&chan->retrans_list);
1902 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1904 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1909 bt_cb(skb)->control.retries++;
1910 control = bt_cb(skb)->control;
1912 if (chan->max_tx != 0 &&
1913 bt_cb(skb)->control.retries > chan->max_tx) {
1914 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1915 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1916 l2cap_seq_list_clear(&chan->retrans_list);
1920 control.reqseq = chan->buffer_seq;
1921 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1926 if (skb_cloned(skb)) {
1927 /* Cloned sk_buffs are read-only, so we need a
1930 tx_skb = skb_copy(skb, GFP_KERNEL);
1932 tx_skb = skb_clone(skb, GFP_KERNEL);
1936 l2cap_seq_list_clear(&chan->retrans_list);
1940 /* Update skb contents */
1941 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1942 put_unaligned_le32(__pack_extended_control(&control),
1943 tx_skb->data + L2CAP_HDR_SIZE);
1945 put_unaligned_le16(__pack_enhanced_control(&control),
1946 tx_skb->data + L2CAP_HDR_SIZE);
1949 if (chan->fcs == L2CAP_FCS_CRC16) {
1950 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1951 put_unaligned_le16(fcs, skb_put(tx_skb,
1955 l2cap_do_send(chan, tx_skb);
1957 BT_DBG("Resent txseq %d", control.txseq);
1959 chan->last_acked_seq = chan->buffer_seq;
1963 static void l2cap_retransmit(struct l2cap_chan *chan,
1964 struct l2cap_ctrl *control)
1966 BT_DBG("chan %p, control %p", chan, control);
1968 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1969 l2cap_ertm_resend(chan);
1972 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1973 struct l2cap_ctrl *control)
1975 struct sk_buff *skb;
1977 BT_DBG("chan %p, control %p", chan, control);
1980 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1982 l2cap_seq_list_clear(&chan->retrans_list);
1984 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1987 if (chan->unacked_frames) {
1988 skb_queue_walk(&chan->tx_q, skb) {
1989 if (bt_cb(skb)->control.txseq == control->reqseq ||
1990 skb == chan->tx_send_head)
1994 skb_queue_walk_from(&chan->tx_q, skb) {
1995 if (skb == chan->tx_send_head)
1998 l2cap_seq_list_append(&chan->retrans_list,
1999 bt_cb(skb)->control.txseq);
2002 l2cap_ertm_resend(chan);
2006 static void l2cap_send_ack(struct l2cap_chan *chan)
2008 struct l2cap_ctrl control;
2009 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2010 chan->last_acked_seq);
2013 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2014 chan, chan->last_acked_seq, chan->buffer_seq);
2016 memset(&control, 0, sizeof(control));
2019 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2020 chan->rx_state == L2CAP_RX_STATE_RECV) {
2021 __clear_ack_timer(chan);
2022 control.super = L2CAP_SUPER_RNR;
2023 control.reqseq = chan->buffer_seq;
2024 l2cap_send_sframe(chan, &control);
2026 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2027 l2cap_ertm_send(chan);
2028 /* If any i-frames were sent, they included an ack */
2029 if (chan->buffer_seq == chan->last_acked_seq)
2033 /* Ack now if the window is 3/4ths full.
2034 * Calculate without mul or div
2036 threshold = chan->ack_win;
2037 threshold += threshold << 1;
2040 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2043 if (frames_to_ack >= threshold) {
2044 __clear_ack_timer(chan);
2045 control.super = L2CAP_SUPER_RR;
2046 control.reqseq = chan->buffer_seq;
2047 l2cap_send_sframe(chan, &control);
2052 __set_ack_timer(chan);
2056 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2057 struct msghdr *msg, int len,
2058 int count, struct sk_buff *skb)
2060 struct l2cap_conn *conn = chan->conn;
2061 struct sk_buff **frag;
2064 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2070 /* Continuation fragments (no L2CAP header) */
2071 frag = &skb_shinfo(skb)->frag_list;
2073 struct sk_buff *tmp;
2075 count = min_t(unsigned int, conn->mtu, len);
2077 tmp = chan->ops->alloc_skb(chan, count,
2078 msg->msg_flags & MSG_DONTWAIT);
2080 return PTR_ERR(tmp);
2084 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2087 (*frag)->priority = skb->priority;
2092 skb->len += (*frag)->len;
2093 skb->data_len += (*frag)->len;
2095 frag = &(*frag)->next;
2101 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2102 struct msghdr *msg, size_t len,
2105 struct l2cap_conn *conn = chan->conn;
2106 struct sk_buff *skb;
2107 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2108 struct l2cap_hdr *lh;
2110 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2112 count = min_t(unsigned int, (conn->mtu - hlen), len);
2114 skb = chan->ops->alloc_skb(chan, count + hlen,
2115 msg->msg_flags & MSG_DONTWAIT);
2119 skb->priority = priority;
2121 /* Create L2CAP header */
2122 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2123 lh->cid = cpu_to_le16(chan->dcid);
2124 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2125 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2127 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2128 if (unlikely(err < 0)) {
2130 return ERR_PTR(err);
2135 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2136 struct msghdr *msg, size_t len,
2139 struct l2cap_conn *conn = chan->conn;
2140 struct sk_buff *skb;
2142 struct l2cap_hdr *lh;
2144 BT_DBG("chan %p len %zu", chan, len);
2146 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2148 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2149 msg->msg_flags & MSG_DONTWAIT);
2153 skb->priority = priority;
2155 /* Create L2CAP header */
2156 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2157 lh->cid = cpu_to_le16(chan->dcid);
2158 lh->len = cpu_to_le16(len);
2160 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2161 if (unlikely(err < 0)) {
2163 return ERR_PTR(err);
2168 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2169 struct msghdr *msg, size_t len,
2172 struct l2cap_conn *conn = chan->conn;
2173 struct sk_buff *skb;
2174 int err, count, hlen;
2175 struct l2cap_hdr *lh;
2177 BT_DBG("chan %p len %zu", chan, len);
2180 return ERR_PTR(-ENOTCONN);
2182 hlen = __ertm_hdr_size(chan);
2185 hlen += L2CAP_SDULEN_SIZE;
2187 if (chan->fcs == L2CAP_FCS_CRC16)
2188 hlen += L2CAP_FCS_SIZE;
2190 count = min_t(unsigned int, (conn->mtu - hlen), len);
2192 skb = chan->ops->alloc_skb(chan, count + hlen,
2193 msg->msg_flags & MSG_DONTWAIT);
2197 /* Create L2CAP header */
2198 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2199 lh->cid = cpu_to_le16(chan->dcid);
2200 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2202 /* Control header is populated later */
2203 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2204 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2206 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2209 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2211 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2212 if (unlikely(err < 0)) {
2214 return ERR_PTR(err);
2217 bt_cb(skb)->control.fcs = chan->fcs;
2218 bt_cb(skb)->control.retries = 0;
2222 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2223 struct sk_buff_head *seg_queue,
2224 struct msghdr *msg, size_t len)
2226 struct sk_buff *skb;
2231 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2233 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2234 * so fragmented skbs are not used. The HCI layer's handling
2235 * of fragmented skbs is not compatible with ERTM's queueing.
2238 /* PDU size is derived from the HCI MTU */
2239 pdu_len = chan->conn->mtu;
2241 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2243 /* Adjust for largest possible L2CAP overhead. */
2245 pdu_len -= L2CAP_FCS_SIZE;
2247 pdu_len -= __ertm_hdr_size(chan);
2249 /* Remote device may have requested smaller PDUs */
2250 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2252 if (len <= pdu_len) {
2253 sar = L2CAP_SAR_UNSEGMENTED;
2257 sar = L2CAP_SAR_START;
2259 pdu_len -= L2CAP_SDULEN_SIZE;
2263 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2266 __skb_queue_purge(seg_queue);
2267 return PTR_ERR(skb);
2270 bt_cb(skb)->control.sar = sar;
2271 __skb_queue_tail(seg_queue, skb);
2276 pdu_len += L2CAP_SDULEN_SIZE;
2279 if (len <= pdu_len) {
2280 sar = L2CAP_SAR_END;
2283 sar = L2CAP_SAR_CONTINUE;
2290 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2293 struct sk_buff *skb;
2295 struct sk_buff_head seg_queue;
2297 /* Connectionless channel */
2298 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2299 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2301 return PTR_ERR(skb);
2303 l2cap_do_send(chan, skb);
2307 switch (chan->mode) {
2308 case L2CAP_MODE_BASIC:
2309 /* Check outgoing MTU */
2310 if (len > chan->omtu)
2313 /* Create a basic PDU */
2314 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2316 return PTR_ERR(skb);
2318 l2cap_do_send(chan, skb);
2322 case L2CAP_MODE_ERTM:
2323 case L2CAP_MODE_STREAMING:
2324 /* Check outgoing MTU */
2325 if (len > chan->omtu) {
2330 __skb_queue_head_init(&seg_queue);
2332 /* Do segmentation before calling in to the state machine,
2333 * since it's possible to block while waiting for memory
2336 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2338 /* The channel could have been closed while segmenting,
2339 * check that it is still connected.
2341 if (chan->state != BT_CONNECTED) {
2342 __skb_queue_purge(&seg_queue);
2349 if (chan->mode == L2CAP_MODE_ERTM)
2350 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2352 l2cap_streaming_send(chan, &seg_queue);
2356 /* If the skbs were not queued for sending, they'll still be in
2357 * seg_queue and need to be purged.
2359 __skb_queue_purge(&seg_queue);
2363 BT_DBG("bad state %1.1x", chan->mode);
2370 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2372 struct l2cap_ctrl control;
2375 BT_DBG("chan %p, txseq %u", chan, txseq);
2377 memset(&control, 0, sizeof(control));
2379 control.super = L2CAP_SUPER_SREJ;
2381 for (seq = chan->expected_tx_seq; seq != txseq;
2382 seq = __next_seq(chan, seq)) {
2383 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2384 control.reqseq = seq;
2385 l2cap_send_sframe(chan, &control);
2386 l2cap_seq_list_append(&chan->srej_list, seq);
2390 chan->expected_tx_seq = __next_seq(chan, txseq);
2393 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2395 struct l2cap_ctrl control;
2397 BT_DBG("chan %p", chan);
2399 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2402 memset(&control, 0, sizeof(control));
2404 control.super = L2CAP_SUPER_SREJ;
2405 control.reqseq = chan->srej_list.tail;
2406 l2cap_send_sframe(chan, &control);
2409 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2411 struct l2cap_ctrl control;
2415 BT_DBG("chan %p, txseq %u", chan, txseq);
2417 memset(&control, 0, sizeof(control));
2419 control.super = L2CAP_SUPER_SREJ;
2421 /* Capture initial list head to allow only one pass through the list. */
2422 initial_head = chan->srej_list.head;
2425 seq = l2cap_seq_list_pop(&chan->srej_list);
2426 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2429 control.reqseq = seq;
2430 l2cap_send_sframe(chan, &control);
2431 l2cap_seq_list_append(&chan->srej_list, seq);
2432 } while (chan->srej_list.head != initial_head);
2435 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2437 struct sk_buff *acked_skb;
2440 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2442 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2445 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2446 chan->expected_ack_seq, chan->unacked_frames);
2448 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2449 ackseq = __next_seq(chan, ackseq)) {
2451 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2453 skb_unlink(acked_skb, &chan->tx_q);
2454 kfree_skb(acked_skb);
2455 chan->unacked_frames--;
2459 chan->expected_ack_seq = reqseq;
2461 if (chan->unacked_frames == 0)
2462 __clear_retrans_timer(chan);
2464 BT_DBG("unacked_frames %u", chan->unacked_frames);
2467 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2469 BT_DBG("chan %p", chan);
2471 chan->expected_tx_seq = chan->buffer_seq;
2472 l2cap_seq_list_clear(&chan->srej_list);
2473 skb_queue_purge(&chan->srej_q);
2474 chan->rx_state = L2CAP_RX_STATE_RECV;
2477 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2478 struct l2cap_ctrl *control,
2479 struct sk_buff_head *skbs, u8 event)
2481 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2485 case L2CAP_EV_DATA_REQUEST:
2486 if (chan->tx_send_head == NULL)
2487 chan->tx_send_head = skb_peek(skbs);
2489 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2490 l2cap_ertm_send(chan);
2492 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2493 BT_DBG("Enter LOCAL_BUSY");
2494 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2496 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2497 /* The SREJ_SENT state must be aborted if we are to
2498 * enter the LOCAL_BUSY state.
2500 l2cap_abort_rx_srej_sent(chan);
2503 l2cap_send_ack(chan);
2506 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2507 BT_DBG("Exit LOCAL_BUSY");
2508 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2510 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2511 struct l2cap_ctrl local_control;
2513 memset(&local_control, 0, sizeof(local_control));
2514 local_control.sframe = 1;
2515 local_control.super = L2CAP_SUPER_RR;
2516 local_control.poll = 1;
2517 local_control.reqseq = chan->buffer_seq;
2518 l2cap_send_sframe(chan, &local_control);
2520 chan->retry_count = 1;
2521 __set_monitor_timer(chan);
2522 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2525 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2526 l2cap_process_reqseq(chan, control->reqseq);
2528 case L2CAP_EV_EXPLICIT_POLL:
2529 l2cap_send_rr_or_rnr(chan, 1);
2530 chan->retry_count = 1;
2531 __set_monitor_timer(chan);
2532 __clear_ack_timer(chan);
2533 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2535 case L2CAP_EV_RETRANS_TO:
2536 l2cap_send_rr_or_rnr(chan, 1);
2537 chan->retry_count = 1;
2538 __set_monitor_timer(chan);
2539 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2541 case L2CAP_EV_RECV_FBIT:
2542 /* Nothing to process */
2549 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2550 struct l2cap_ctrl *control,
2551 struct sk_buff_head *skbs, u8 event)
2553 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2557 case L2CAP_EV_DATA_REQUEST:
2558 if (chan->tx_send_head == NULL)
2559 chan->tx_send_head = skb_peek(skbs);
2560 /* Queue data, but don't send. */
2561 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2563 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2564 BT_DBG("Enter LOCAL_BUSY");
2565 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2567 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2568 /* The SREJ_SENT state must be aborted if we are to
2569 * enter the LOCAL_BUSY state.
2571 l2cap_abort_rx_srej_sent(chan);
2574 l2cap_send_ack(chan);
2577 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2578 BT_DBG("Exit LOCAL_BUSY");
2579 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2581 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2582 struct l2cap_ctrl local_control;
2583 memset(&local_control, 0, sizeof(local_control));
2584 local_control.sframe = 1;
2585 local_control.super = L2CAP_SUPER_RR;
2586 local_control.poll = 1;
2587 local_control.reqseq = chan->buffer_seq;
2588 l2cap_send_sframe(chan, &local_control);
2590 chan->retry_count = 1;
2591 __set_monitor_timer(chan);
2592 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2595 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2596 l2cap_process_reqseq(chan, control->reqseq);
2600 case L2CAP_EV_RECV_FBIT:
2601 if (control && control->final) {
2602 __clear_monitor_timer(chan);
2603 if (chan->unacked_frames > 0)
2604 __set_retrans_timer(chan);
2605 chan->retry_count = 0;
2606 chan->tx_state = L2CAP_TX_STATE_XMIT;
2607 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2610 case L2CAP_EV_EXPLICIT_POLL:
2613 case L2CAP_EV_MONITOR_TO:
2614 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2615 l2cap_send_rr_or_rnr(chan, 1);
2616 __set_monitor_timer(chan);
2617 chan->retry_count++;
2619 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2627 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2628 struct sk_buff_head *skbs, u8 event)
2630 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2631 chan, control, skbs, event, chan->tx_state);
2633 switch (chan->tx_state) {
2634 case L2CAP_TX_STATE_XMIT:
2635 l2cap_tx_state_xmit(chan, control, skbs, event);
2637 case L2CAP_TX_STATE_WAIT_F:
2638 l2cap_tx_state_wait_f(chan, control, skbs, event);
2646 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2647 struct l2cap_ctrl *control)
2649 BT_DBG("chan %p, control %p", chan, control);
2650 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2653 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2654 struct l2cap_ctrl *control)
2656 BT_DBG("chan %p, control %p", chan, control);
2657 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2660 /* Copy frame to all raw sockets on that connection */
2661 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2663 struct sk_buff *nskb;
2664 struct l2cap_chan *chan;
2666 BT_DBG("conn %p", conn);
2668 mutex_lock(&conn->chan_lock);
2670 list_for_each_entry(chan, &conn->chan_l, list) {
2671 struct sock *sk = chan->sk;
2672 if (chan->chan_type != L2CAP_CHAN_RAW)
2675 /* Don't send frame to the socket it came from */
2678 nskb = skb_clone(skb, GFP_KERNEL);
2682 if (chan->ops->recv(chan, nskb))
2686 mutex_unlock(&conn->chan_lock);
2689 /* ---- L2CAP signalling commands ---- */
2690 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2691 u8 ident, u16 dlen, void *data)
2693 struct sk_buff *skb, **frag;
2694 struct l2cap_cmd_hdr *cmd;
2695 struct l2cap_hdr *lh;
2698 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2699 conn, code, ident, dlen);
2701 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2702 count = min_t(unsigned int, conn->mtu, len);
2704 skb = bt_skb_alloc(count, GFP_KERNEL);
2708 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2709 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2711 if (conn->hcon->type == LE_LINK)
2712 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2714 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2716 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2719 cmd->len = cpu_to_le16(dlen);
2722 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2723 memcpy(skb_put(skb, count), data, count);
2729 /* Continuation fragments (no L2CAP header) */
2730 frag = &skb_shinfo(skb)->frag_list;
2732 count = min_t(unsigned int, conn->mtu, len);
2734 *frag = bt_skb_alloc(count, GFP_KERNEL);
2738 memcpy(skb_put(*frag, count), data, count);
2743 frag = &(*frag)->next;
2753 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2756 struct l2cap_conf_opt *opt = *ptr;
2759 len = L2CAP_CONF_OPT_SIZE + opt->len;
2767 *val = *((u8 *) opt->val);
2771 *val = get_unaligned_le16(opt->val);
2775 *val = get_unaligned_le32(opt->val);
2779 *val = (unsigned long) opt->val;
2783 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2787 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2789 struct l2cap_conf_opt *opt = *ptr;
2791 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2798 *((u8 *) opt->val) = val;
2802 put_unaligned_le16(val, opt->val);
2806 put_unaligned_le32(val, opt->val);
2810 memcpy(opt->val, (void *) val, len);
2814 *ptr += L2CAP_CONF_OPT_SIZE + len;
2817 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2819 struct l2cap_conf_efs efs;
2821 switch (chan->mode) {
2822 case L2CAP_MODE_ERTM:
2823 efs.id = chan->local_id;
2824 efs.stype = chan->local_stype;
2825 efs.msdu = cpu_to_le16(chan->local_msdu);
2826 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2827 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2828 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2831 case L2CAP_MODE_STREAMING:
2833 efs.stype = L2CAP_SERV_BESTEFFORT;
2834 efs.msdu = cpu_to_le16(chan->local_msdu);
2835 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2844 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2845 (unsigned long) &efs);
2848 static void l2cap_ack_timeout(struct work_struct *work)
2850 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2854 BT_DBG("chan %p", chan);
2856 l2cap_chan_lock(chan);
2858 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2859 chan->last_acked_seq);
2862 l2cap_send_rr_or_rnr(chan, 0);
2864 l2cap_chan_unlock(chan);
2865 l2cap_chan_put(chan);
2868 int l2cap_ertm_init(struct l2cap_chan *chan)
2872 chan->next_tx_seq = 0;
2873 chan->expected_tx_seq = 0;
2874 chan->expected_ack_seq = 0;
2875 chan->unacked_frames = 0;
2876 chan->buffer_seq = 0;
2877 chan->frames_sent = 0;
2878 chan->last_acked_seq = 0;
2880 chan->sdu_last_frag = NULL;
2883 skb_queue_head_init(&chan->tx_q);
2885 chan->local_amp_id = 0;
2887 chan->move_state = L2CAP_MOVE_STABLE;
2888 chan->move_role = L2CAP_MOVE_ROLE_NONE;
2890 if (chan->mode != L2CAP_MODE_ERTM)
2893 chan->rx_state = L2CAP_RX_STATE_RECV;
2894 chan->tx_state = L2CAP_TX_STATE_XMIT;
2896 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2897 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2898 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2900 skb_queue_head_init(&chan->srej_q);
2902 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2906 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2908 l2cap_seq_list_free(&chan->srej_list);
2913 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2916 case L2CAP_MODE_STREAMING:
2917 case L2CAP_MODE_ERTM:
2918 if (l2cap_mode_supported(mode, remote_feat_mask))
2922 return L2CAP_MODE_BASIC;
2926 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2928 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2931 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2933 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2936 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2938 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2939 __l2cap_ews_supported(chan)) {
2940 /* use extended control field */
2941 set_bit(FLAG_EXT_CTRL, &chan->flags);
2942 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2944 chan->tx_win = min_t(u16, chan->tx_win,
2945 L2CAP_DEFAULT_TX_WINDOW);
2946 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2948 chan->ack_win = chan->tx_win;
2951 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2953 struct l2cap_conf_req *req = data;
2954 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2955 void *ptr = req->data;
2958 BT_DBG("chan %p", chan);
2960 if (chan->num_conf_req || chan->num_conf_rsp)
2963 switch (chan->mode) {
2964 case L2CAP_MODE_STREAMING:
2965 case L2CAP_MODE_ERTM:
2966 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2969 if (__l2cap_efs_supported(chan))
2970 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2974 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2979 if (chan->imtu != L2CAP_DEFAULT_MTU)
2980 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2982 switch (chan->mode) {
2983 case L2CAP_MODE_BASIC:
2984 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2985 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2988 rfc.mode = L2CAP_MODE_BASIC;
2990 rfc.max_transmit = 0;
2991 rfc.retrans_timeout = 0;
2992 rfc.monitor_timeout = 0;
2993 rfc.max_pdu_size = 0;
2995 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2996 (unsigned long) &rfc);
2999 case L2CAP_MODE_ERTM:
3000 rfc.mode = L2CAP_MODE_ERTM;
3001 rfc.max_transmit = chan->max_tx;
3002 rfc.retrans_timeout = 0;
3003 rfc.monitor_timeout = 0;
3005 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3006 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3008 rfc.max_pdu_size = cpu_to_le16(size);
3010 l2cap_txwin_setup(chan);
3012 rfc.txwin_size = min_t(u16, chan->tx_win,
3013 L2CAP_DEFAULT_TX_WINDOW);
3015 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3016 (unsigned long) &rfc);
3018 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3019 l2cap_add_opt_efs(&ptr, chan);
3021 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3024 if (chan->fcs == L2CAP_FCS_NONE ||
3025 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3026 chan->fcs = L2CAP_FCS_NONE;
3027 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3030 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3031 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3035 case L2CAP_MODE_STREAMING:
3036 l2cap_txwin_setup(chan);
3037 rfc.mode = L2CAP_MODE_STREAMING;
3039 rfc.max_transmit = 0;
3040 rfc.retrans_timeout = 0;
3041 rfc.monitor_timeout = 0;
3043 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3044 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3046 rfc.max_pdu_size = cpu_to_le16(size);
3048 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3049 (unsigned long) &rfc);
3051 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3052 l2cap_add_opt_efs(&ptr, chan);
3054 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
3057 if (chan->fcs == L2CAP_FCS_NONE ||
3058 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
3059 chan->fcs = L2CAP_FCS_NONE;
3060 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
3065 req->dcid = cpu_to_le16(chan->dcid);
3066 req->flags = __constant_cpu_to_le16(0);
3071 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3073 struct l2cap_conf_rsp *rsp = data;
3074 void *ptr = rsp->data;
3075 void *req = chan->conf_req;
3076 int len = chan->conf_len;
3077 int type, hint, olen;
3079 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3080 struct l2cap_conf_efs efs;
3082 u16 mtu = L2CAP_DEFAULT_MTU;
3083 u16 result = L2CAP_CONF_SUCCESS;
3086 BT_DBG("chan %p", chan);
3088 while (len >= L2CAP_CONF_OPT_SIZE) {
3089 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3091 hint = type & L2CAP_CONF_HINT;
3092 type &= L2CAP_CONF_MASK;
3095 case L2CAP_CONF_MTU:
3099 case L2CAP_CONF_FLUSH_TO:
3100 chan->flush_to = val;
3103 case L2CAP_CONF_QOS:
3106 case L2CAP_CONF_RFC:
3107 if (olen == sizeof(rfc))
3108 memcpy(&rfc, (void *) val, olen);
3111 case L2CAP_CONF_FCS:
3112 if (val == L2CAP_FCS_NONE)
3113 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3116 case L2CAP_CONF_EFS:
3118 if (olen == sizeof(efs))
3119 memcpy(&efs, (void *) val, olen);
3122 case L2CAP_CONF_EWS:
3124 return -ECONNREFUSED;
3126 set_bit(FLAG_EXT_CTRL, &chan->flags);
3127 set_bit(CONF_EWS_RECV, &chan->conf_state);
3128 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3129 chan->remote_tx_win = val;
3136 result = L2CAP_CONF_UNKNOWN;
3137 *((u8 *) ptr++) = type;
3142 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3145 switch (chan->mode) {
3146 case L2CAP_MODE_STREAMING:
3147 case L2CAP_MODE_ERTM:
3148 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3149 chan->mode = l2cap_select_mode(rfc.mode,
3150 chan->conn->feat_mask);
3155 if (__l2cap_efs_supported(chan))
3156 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3158 return -ECONNREFUSED;
3161 if (chan->mode != rfc.mode)
3162 return -ECONNREFUSED;
3168 if (chan->mode != rfc.mode) {
3169 result = L2CAP_CONF_UNACCEPT;
3170 rfc.mode = chan->mode;
3172 if (chan->num_conf_rsp == 1)
3173 return -ECONNREFUSED;
3175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3176 (unsigned long) &rfc);
3179 if (result == L2CAP_CONF_SUCCESS) {
3180 /* Configure output options and let the other side know
3181 * which ones we don't like. */
3183 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3184 result = L2CAP_CONF_UNACCEPT;
3187 set_bit(CONF_MTU_DONE, &chan->conf_state);
3189 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3192 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3193 efs.stype != L2CAP_SERV_NOTRAFIC &&
3194 efs.stype != chan->local_stype) {
3196 result = L2CAP_CONF_UNACCEPT;
3198 if (chan->num_conf_req >= 1)
3199 return -ECONNREFUSED;
3201 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3203 (unsigned long) &efs);
3205 /* Send PENDING Conf Rsp */
3206 result = L2CAP_CONF_PENDING;
3207 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3212 case L2CAP_MODE_BASIC:
3213 chan->fcs = L2CAP_FCS_NONE;
3214 set_bit(CONF_MODE_DONE, &chan->conf_state);
3217 case L2CAP_MODE_ERTM:
3218 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3219 chan->remote_tx_win = rfc.txwin_size;
3221 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3223 chan->remote_max_tx = rfc.max_transmit;
3225 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3226 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3227 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3228 rfc.max_pdu_size = cpu_to_le16(size);
3229 chan->remote_mps = size;
3231 rfc.retrans_timeout =
3232 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3233 rfc.monitor_timeout =
3234 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3236 set_bit(CONF_MODE_DONE, &chan->conf_state);
3238 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3239 sizeof(rfc), (unsigned long) &rfc);
3241 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3242 chan->remote_id = efs.id;
3243 chan->remote_stype = efs.stype;
3244 chan->remote_msdu = le16_to_cpu(efs.msdu);
3245 chan->remote_flush_to =
3246 le32_to_cpu(efs.flush_to);
3247 chan->remote_acc_lat =
3248 le32_to_cpu(efs.acc_lat);
3249 chan->remote_sdu_itime =
3250 le32_to_cpu(efs.sdu_itime);
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3253 (unsigned long) &efs);
3257 case L2CAP_MODE_STREAMING:
3258 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3259 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3260 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3261 rfc.max_pdu_size = cpu_to_le16(size);
3262 chan->remote_mps = size;
3264 set_bit(CONF_MODE_DONE, &chan->conf_state);
3266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3267 (unsigned long) &rfc);
3272 result = L2CAP_CONF_UNACCEPT;
3274 memset(&rfc, 0, sizeof(rfc));
3275 rfc.mode = chan->mode;
3278 if (result == L2CAP_CONF_SUCCESS)
3279 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3281 rsp->scid = cpu_to_le16(chan->dcid);
3282 rsp->result = cpu_to_le16(result);
3283 rsp->flags = __constant_cpu_to_le16(0);
3288 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3289 void *data, u16 *result)
3291 struct l2cap_conf_req *req = data;
3292 void *ptr = req->data;
3295 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3296 struct l2cap_conf_efs efs;
3298 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3300 while (len >= L2CAP_CONF_OPT_SIZE) {
3301 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3304 case L2CAP_CONF_MTU:
3305 if (val < L2CAP_DEFAULT_MIN_MTU) {
3306 *result = L2CAP_CONF_UNACCEPT;
3307 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3310 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3313 case L2CAP_CONF_FLUSH_TO:
3314 chan->flush_to = val;
3315 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3319 case L2CAP_CONF_RFC:
3320 if (olen == sizeof(rfc))
3321 memcpy(&rfc, (void *)val, olen);
3323 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3324 rfc.mode != chan->mode)
3325 return -ECONNREFUSED;
3329 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3330 sizeof(rfc), (unsigned long) &rfc);
3333 case L2CAP_CONF_EWS:
3334 chan->ack_win = min_t(u16, val, chan->ack_win);
3335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3339 case L2CAP_CONF_EFS:
3340 if (olen == sizeof(efs))
3341 memcpy(&efs, (void *)val, olen);
3343 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3344 efs.stype != L2CAP_SERV_NOTRAFIC &&
3345 efs.stype != chan->local_stype)
3346 return -ECONNREFUSED;
3348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3349 (unsigned long) &efs);
3354 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3355 return -ECONNREFUSED;
3357 chan->mode = rfc.mode;
3359 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3361 case L2CAP_MODE_ERTM:
3362 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3363 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3364 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3365 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3366 chan->ack_win = min_t(u16, chan->ack_win,
3369 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3370 chan->local_msdu = le16_to_cpu(efs.msdu);
3371 chan->local_sdu_itime =
3372 le32_to_cpu(efs.sdu_itime);
3373 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3374 chan->local_flush_to =
3375 le32_to_cpu(efs.flush_to);
3379 case L2CAP_MODE_STREAMING:
3380 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3384 req->dcid = cpu_to_le16(chan->dcid);
3385 req->flags = __constant_cpu_to_le16(0);
3390 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3391 u16 result, u16 flags)
3393 struct l2cap_conf_rsp *rsp = data;
3394 void *ptr = rsp->data;
3396 BT_DBG("chan %p", chan);
3398 rsp->scid = cpu_to_le16(chan->dcid);
3399 rsp->result = cpu_to_le16(result);
3400 rsp->flags = cpu_to_le16(flags);
3405 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3407 struct l2cap_conn_rsp rsp;
3408 struct l2cap_conn *conn = chan->conn;
3411 rsp.scid = cpu_to_le16(chan->dcid);
3412 rsp.dcid = cpu_to_le16(chan->scid);
3413 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3414 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3415 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3417 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3420 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3421 l2cap_build_conf_req(chan, buf), buf);
3422 chan->num_conf_req++;
3425 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3429 /* Use sane default values in case a misbehaving remote device
3430 * did not send an RFC or extended window size option.
3432 u16 txwin_ext = chan->ack_win;
3433 struct l2cap_conf_rfc rfc = {
3435 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3436 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3437 .max_pdu_size = cpu_to_le16(chan->imtu),
3438 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3441 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3443 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3446 while (len >= L2CAP_CONF_OPT_SIZE) {
3447 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3450 case L2CAP_CONF_RFC:
3451 if (olen == sizeof(rfc))
3452 memcpy(&rfc, (void *)val, olen);
3454 case L2CAP_CONF_EWS:
3461 case L2CAP_MODE_ERTM:
3462 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3463 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3464 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3465 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3466 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3468 chan->ack_win = min_t(u16, chan->ack_win,
3471 case L2CAP_MODE_STREAMING:
3472 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3476 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3477 struct l2cap_cmd_hdr *cmd, u8 *data)
3479 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3481 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3484 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3485 cmd->ident == conn->info_ident) {
3486 cancel_delayed_work(&conn->info_timer);
3488 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3489 conn->info_ident = 0;
3491 l2cap_conn_start(conn);
3497 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3498 struct l2cap_cmd_hdr *cmd,
3499 u8 *data, u8 rsp_code, u8 amp_id)
3501 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3502 struct l2cap_conn_rsp rsp;
3503 struct l2cap_chan *chan = NULL, *pchan;
3504 struct sock *parent, *sk = NULL;
3505 int result, status = L2CAP_CS_NO_INFO;
3507 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3508 __le16 psm = req->psm;
3510 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3512 /* Check if we have socket listening on psm */
3513 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3515 result = L2CAP_CR_BAD_PSM;
3521 mutex_lock(&conn->chan_lock);
3524 /* Check if the ACL is secure enough (if not SDP) */
3525 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3526 !hci_conn_check_link_mode(conn->hcon)) {
3527 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3528 result = L2CAP_CR_SEC_BLOCK;
3532 result = L2CAP_CR_NO_MEM;
3534 /* Check if we already have channel with that dcid */
3535 if (__l2cap_get_chan_by_dcid(conn, scid))
3538 chan = pchan->ops->new_connection(pchan);
3544 hci_conn_hold(conn->hcon);
3546 bacpy(&bt_sk(sk)->src, conn->src);
3547 bacpy(&bt_sk(sk)->dst, conn->dst);
3550 chan->local_amp_id = amp_id;
3552 __l2cap_chan_add(conn, chan);
3556 __set_chan_timer(chan, sk->sk_sndtimeo);
3558 chan->ident = cmd->ident;
3560 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3561 if (l2cap_chan_check_security(chan)) {
3562 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3563 __l2cap_state_change(chan, BT_CONNECT2);
3564 result = L2CAP_CR_PEND;
3565 status = L2CAP_CS_AUTHOR_PEND;
3566 chan->ops->defer(chan);
3568 /* Force pending result for AMP controllers.
3569 * The connection will succeed after the
3570 * physical link is up.
3573 __l2cap_state_change(chan, BT_CONNECT2);
3574 result = L2CAP_CR_PEND;
3576 __l2cap_state_change(chan, BT_CONFIG);
3577 result = L2CAP_CR_SUCCESS;
3579 status = L2CAP_CS_NO_INFO;
3582 __l2cap_state_change(chan, BT_CONNECT2);
3583 result = L2CAP_CR_PEND;
3584 status = L2CAP_CS_AUTHEN_PEND;
3587 __l2cap_state_change(chan, BT_CONNECT2);
3588 result = L2CAP_CR_PEND;
3589 status = L2CAP_CS_NO_INFO;
3593 release_sock(parent);
3594 mutex_unlock(&conn->chan_lock);
3597 rsp.scid = cpu_to_le16(scid);
3598 rsp.dcid = cpu_to_le16(dcid);
3599 rsp.result = cpu_to_le16(result);
3600 rsp.status = cpu_to_le16(status);
3601 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3603 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3604 struct l2cap_info_req info;
3605 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3607 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3608 conn->info_ident = l2cap_get_ident(conn);
3610 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3612 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3613 sizeof(info), &info);
3616 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3617 result == L2CAP_CR_SUCCESS) {
3619 set_bit(CONF_REQ_SENT, &chan->conf_state);
3620 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3621 l2cap_build_conf_req(chan, buf), buf);
3622 chan->num_conf_req++;
3628 static int l2cap_connect_req(struct l2cap_conn *conn,
3629 struct l2cap_cmd_hdr *cmd, u8 *data)
3631 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3635 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3636 struct l2cap_cmd_hdr *cmd, u8 *data)
3638 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3639 u16 scid, dcid, result, status;
3640 struct l2cap_chan *chan;
3644 scid = __le16_to_cpu(rsp->scid);
3645 dcid = __le16_to_cpu(rsp->dcid);
3646 result = __le16_to_cpu(rsp->result);
3647 status = __le16_to_cpu(rsp->status);
3649 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3650 dcid, scid, result, status);
3652 mutex_lock(&conn->chan_lock);
3655 chan = __l2cap_get_chan_by_scid(conn, scid);
3661 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3670 l2cap_chan_lock(chan);
3673 case L2CAP_CR_SUCCESS:
3674 l2cap_state_change(chan, BT_CONFIG);
3677 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3679 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3682 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3683 l2cap_build_conf_req(chan, req), req);
3684 chan->num_conf_req++;
3688 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3692 l2cap_chan_del(chan, ECONNREFUSED);
3696 l2cap_chan_unlock(chan);
3699 mutex_unlock(&conn->chan_lock);
3704 static inline void set_default_fcs(struct l2cap_chan *chan)
3706 /* FCS is enabled only in ERTM or streaming mode, if one or both
3709 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3710 chan->fcs = L2CAP_FCS_NONE;
3711 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3712 chan->fcs = L2CAP_FCS_CRC16;
3715 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3716 u8 ident, u16 flags)
3718 struct l2cap_conn *conn = chan->conn;
3720 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3723 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3724 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3726 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3727 l2cap_build_conf_rsp(chan, data,
3728 L2CAP_CONF_SUCCESS, flags), data);
3731 static inline int l2cap_config_req(struct l2cap_conn *conn,
3732 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3735 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3738 struct l2cap_chan *chan;
3741 dcid = __le16_to_cpu(req->dcid);
3742 flags = __le16_to_cpu(req->flags);
3744 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3746 chan = l2cap_get_chan_by_scid(conn, dcid);
3750 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3751 struct l2cap_cmd_rej_cid rej;
3753 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3754 rej.scid = cpu_to_le16(chan->scid);
3755 rej.dcid = cpu_to_le16(chan->dcid);
3757 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3762 /* Reject if config buffer is too small. */
3763 len = cmd_len - sizeof(*req);
3764 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3765 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3766 l2cap_build_conf_rsp(chan, rsp,
3767 L2CAP_CONF_REJECT, flags), rsp);
3772 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3773 chan->conf_len += len;
3775 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3776 /* Incomplete config. Send empty response. */
3777 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3778 l2cap_build_conf_rsp(chan, rsp,
3779 L2CAP_CONF_SUCCESS, flags), rsp);
3783 /* Complete config. */
3784 len = l2cap_parse_conf_req(chan, rsp);
3786 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3791 chan->num_conf_rsp++;
3793 /* Reset config buffer. */
3796 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3799 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3800 set_default_fcs(chan);
3802 if (chan->mode == L2CAP_MODE_ERTM ||
3803 chan->mode == L2CAP_MODE_STREAMING)
3804 err = l2cap_ertm_init(chan);
3807 l2cap_send_disconn_req(chan->conn, chan, -err);
3809 l2cap_chan_ready(chan);
3814 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3816 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3817 l2cap_build_conf_req(chan, buf), buf);
3818 chan->num_conf_req++;
3821 /* Got Conf Rsp PENDING from remote side and asume we sent
3822 Conf Rsp PENDING in the code above */
3823 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3824 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3826 /* check compatibility */
3828 /* Send rsp for BR/EDR channel */
3830 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3832 chan->ident = cmd->ident;
3836 l2cap_chan_unlock(chan);
3840 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3841 struct l2cap_cmd_hdr *cmd, u8 *data)
3843 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3844 u16 scid, flags, result;
3845 struct l2cap_chan *chan;
3846 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3849 scid = __le16_to_cpu(rsp->scid);
3850 flags = __le16_to_cpu(rsp->flags);
3851 result = __le16_to_cpu(rsp->result);
3853 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3856 chan = l2cap_get_chan_by_scid(conn, scid);
3861 case L2CAP_CONF_SUCCESS:
3862 l2cap_conf_rfc_get(chan, rsp->data, len);
3863 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3866 case L2CAP_CONF_PENDING:
3867 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3869 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3872 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3875 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3879 /* check compatibility */
3882 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3885 chan->ident = cmd->ident;
3889 case L2CAP_CONF_UNACCEPT:
3890 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3893 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3894 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3898 /* throw out any old stored conf requests */
3899 result = L2CAP_CONF_SUCCESS;
3900 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3903 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3907 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3908 L2CAP_CONF_REQ, len, req);
3909 chan->num_conf_req++;
3910 if (result != L2CAP_CONF_SUCCESS)
3916 l2cap_chan_set_err(chan, ECONNRESET);
3918 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3919 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3923 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3926 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3928 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3929 set_default_fcs(chan);
3931 if (chan->mode == L2CAP_MODE_ERTM ||
3932 chan->mode == L2CAP_MODE_STREAMING)
3933 err = l2cap_ertm_init(chan);
3936 l2cap_send_disconn_req(chan->conn, chan, -err);
3938 l2cap_chan_ready(chan);
3942 l2cap_chan_unlock(chan);
3946 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u8 *data)
3949 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3950 struct l2cap_disconn_rsp rsp;
3952 struct l2cap_chan *chan;
3955 scid = __le16_to_cpu(req->scid);
3956 dcid = __le16_to_cpu(req->dcid);
3958 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3960 mutex_lock(&conn->chan_lock);
3962 chan = __l2cap_get_chan_by_scid(conn, dcid);
3964 mutex_unlock(&conn->chan_lock);
3968 l2cap_chan_lock(chan);
3972 rsp.dcid = cpu_to_le16(chan->scid);
3973 rsp.scid = cpu_to_le16(chan->dcid);
3974 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3977 sk->sk_shutdown = SHUTDOWN_MASK;
3980 l2cap_chan_hold(chan);
3981 l2cap_chan_del(chan, ECONNRESET);
3983 l2cap_chan_unlock(chan);
3985 chan->ops->close(chan);
3986 l2cap_chan_put(chan);
3988 mutex_unlock(&conn->chan_lock);
3993 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
3994 struct l2cap_cmd_hdr *cmd, u8 *data)
3996 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3998 struct l2cap_chan *chan;
4000 scid = __le16_to_cpu(rsp->scid);
4001 dcid = __le16_to_cpu(rsp->dcid);
4003 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4005 mutex_lock(&conn->chan_lock);
4007 chan = __l2cap_get_chan_by_scid(conn, scid);
4009 mutex_unlock(&conn->chan_lock);
4013 l2cap_chan_lock(chan);
4015 l2cap_chan_hold(chan);
4016 l2cap_chan_del(chan, 0);
4018 l2cap_chan_unlock(chan);
4020 chan->ops->close(chan);
4021 l2cap_chan_put(chan);
4023 mutex_unlock(&conn->chan_lock);
4028 static inline int l2cap_information_req(struct l2cap_conn *conn,
4029 struct l2cap_cmd_hdr *cmd, u8 *data)
4031 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4034 type = __le16_to_cpu(req->type);
4036 BT_DBG("type 0x%4.4x", type);
4038 if (type == L2CAP_IT_FEAT_MASK) {
4040 u32 feat_mask = l2cap_feat_mask;
4041 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4042 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4043 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4045 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4048 feat_mask |= L2CAP_FEAT_EXT_FLOW
4049 | L2CAP_FEAT_EXT_WINDOW;
4051 put_unaligned_le32(feat_mask, rsp->data);
4052 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4054 } else if (type == L2CAP_IT_FIXED_CHAN) {
4056 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4059 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4061 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4063 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4064 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4065 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4066 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4069 struct l2cap_info_rsp rsp;
4070 rsp.type = cpu_to_le16(type);
4071 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4072 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4079 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4080 struct l2cap_cmd_hdr *cmd, u8 *data)
4082 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4085 type = __le16_to_cpu(rsp->type);
4086 result = __le16_to_cpu(rsp->result);
4088 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4090 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4091 if (cmd->ident != conn->info_ident ||
4092 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4095 cancel_delayed_work(&conn->info_timer);
4097 if (result != L2CAP_IR_SUCCESS) {
4098 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4099 conn->info_ident = 0;
4101 l2cap_conn_start(conn);
4107 case L2CAP_IT_FEAT_MASK:
4108 conn->feat_mask = get_unaligned_le32(rsp->data);
4110 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4111 struct l2cap_info_req req;
4112 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4114 conn->info_ident = l2cap_get_ident(conn);
4116 l2cap_send_cmd(conn, conn->info_ident,
4117 L2CAP_INFO_REQ, sizeof(req), &req);
4119 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4120 conn->info_ident = 0;
4122 l2cap_conn_start(conn);
4126 case L2CAP_IT_FIXED_CHAN:
4127 conn->fixed_chan_mask = rsp->data[0];
4128 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4129 conn->info_ident = 0;
4131 l2cap_conn_start(conn);
4138 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4139 struct l2cap_cmd_hdr *cmd,
4140 u16 cmd_len, void *data)
4142 struct l2cap_create_chan_req *req = data;
4143 struct l2cap_chan *chan;
4146 if (cmd_len != sizeof(*req))
4152 psm = le16_to_cpu(req->psm);
4153 scid = le16_to_cpu(req->scid);
4155 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4158 struct hci_dev *hdev;
4160 /* Validate AMP controller id */
4161 hdev = hci_dev_get(req->amp_id);
4162 if (!hdev || hdev->dev_type != HCI_AMP ||
4163 !test_bit(HCI_UP, &hdev->flags)) {
4164 struct l2cap_create_chan_rsp rsp;
4167 rsp.scid = cpu_to_le16(scid);
4168 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4169 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4171 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4183 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4189 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4190 u16 icid, u16 result)
4192 struct l2cap_move_chan_rsp rsp;
4194 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4196 rsp.icid = cpu_to_le16(icid);
4197 rsp.result = cpu_to_le16(result);
4199 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4202 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4204 struct l2cap_move_chan_cfm cfm;
4206 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4208 chan->ident = l2cap_get_ident(chan->conn);
4210 cfm.icid = cpu_to_le16(chan->scid);
4211 cfm.result = cpu_to_le16(result);
4213 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4216 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4219 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4221 struct l2cap_move_chan_cfm cfm;
4223 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4225 cfm.icid = cpu_to_le16(icid);
4226 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4228 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4232 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4235 struct l2cap_move_chan_cfm_rsp rsp;
4237 BT_DBG("icid 0x%4.4x", icid);
4239 rsp.icid = cpu_to_le16(icid);
4240 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4243 static void __release_logical_link(struct l2cap_chan *chan)
4245 chan->hs_hchan = NULL;
4246 chan->hs_hcon = NULL;
4248 /* Placeholder - release the logical link */
4251 static void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4258 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4259 struct l2cap_cmd_hdr *cmd,
4260 u16 cmd_len, void *data)
4262 struct l2cap_move_chan_req *req = data;
4263 struct l2cap_chan *chan;
4265 u16 result = L2CAP_MR_NOT_ALLOWED;
4267 if (cmd_len != sizeof(*req))
4270 icid = le16_to_cpu(req->icid);
4272 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4277 chan = l2cap_get_chan_by_dcid(conn, icid);
4279 l2cap_send_move_chan_rsp(conn, cmd->ident, icid,
4280 L2CAP_MR_NOT_ALLOWED);
4284 if (chan->scid < L2CAP_CID_DYN_START ||
4285 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4286 (chan->mode != L2CAP_MODE_ERTM &&
4287 chan->mode != L2CAP_MODE_STREAMING)) {
4288 result = L2CAP_MR_NOT_ALLOWED;
4289 goto send_move_response;
4292 if (chan->local_amp_id == req->dest_amp_id) {
4293 result = L2CAP_MR_SAME_ID;
4294 goto send_move_response;
4297 if (req->dest_amp_id) {
4298 struct hci_dev *hdev;
4299 hdev = hci_dev_get(req->dest_amp_id);
4300 if (!hdev || hdev->dev_type != HCI_AMP ||
4301 !test_bit(HCI_UP, &hdev->flags)) {
4305 result = L2CAP_MR_BAD_ID;
4306 goto send_move_response;
4311 /* Detect a move collision. Only send a collision response
4312 * if this side has "lost", otherwise proceed with the move.
4313 * The winner has the larger bd_addr.
4315 if ((__chan_is_moving(chan) ||
4316 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4317 bacmp(conn->src, conn->dst) > 0) {
4318 result = L2CAP_MR_COLLISION;
4319 goto send_move_response;
4322 chan->ident = cmd->ident;
4323 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4324 l2cap_move_setup(chan);
4325 chan->move_id = req->dest_amp_id;
4328 if (!req->dest_amp_id) {
4329 /* Moving to BR/EDR */
4330 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4331 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4332 result = L2CAP_MR_PEND;
4334 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4335 result = L2CAP_MR_SUCCESS;
4338 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4339 /* Placeholder - uncomment when amp functions are available */
4340 /*amp_accept_physical(chan, req->dest_amp_id);*/
4341 result = L2CAP_MR_PEND;
4345 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4347 l2cap_chan_unlock(chan);
4352 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4354 struct l2cap_chan *chan;
4355 struct hci_chan *hchan = NULL;
4357 chan = l2cap_get_chan_by_scid(conn, icid);
4359 l2cap_send_move_chan_cfm_icid(conn, icid);
4363 __clear_chan_timer(chan);
4364 if (result == L2CAP_MR_PEND)
4365 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4367 switch (chan->move_state) {
4368 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4369 /* Move confirm will be sent when logical link
4372 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4374 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4375 if (result == L2CAP_MR_PEND) {
4377 } else if (test_bit(CONN_LOCAL_BUSY,
4378 &chan->conn_state)) {
4379 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4381 /* Logical link is up or moving to BR/EDR,
4384 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4385 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4388 case L2CAP_MOVE_WAIT_RSP:
4390 if (result == L2CAP_MR_SUCCESS) {
4391 /* Remote is ready, send confirm immediately
4392 * after logical link is ready
4394 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4396 /* Both logical link and move success
4397 * are required to confirm
4399 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4402 /* Placeholder - get hci_chan for logical link */
4404 /* Logical link not available */
4405 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4409 /* If the logical link is not yet connected, do not
4410 * send confirmation.
4412 if (hchan->state != BT_CONNECTED)
4415 /* Logical link is already ready to go */
4417 chan->hs_hcon = hchan->conn;
4418 chan->hs_hcon->l2cap_data = chan->conn;
4420 if (result == L2CAP_MR_SUCCESS) {
4421 /* Can confirm now */
4422 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4424 /* Now only need move success
4427 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4430 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4433 /* Any other amp move state means the move failed. */
4434 chan->move_id = chan->local_amp_id;
4435 l2cap_move_done(chan);
4436 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4439 l2cap_chan_unlock(chan);
4442 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4445 struct l2cap_chan *chan;
4447 chan = l2cap_get_chan_by_ident(conn, ident);
4449 /* Could not locate channel, icid is best guess */
4450 l2cap_send_move_chan_cfm_icid(conn, icid);
4454 __clear_chan_timer(chan);
4456 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4457 if (result == L2CAP_MR_COLLISION) {
4458 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4460 /* Cleanup - cancel move */
4461 chan->move_id = chan->local_amp_id;
4462 l2cap_move_done(chan);
4466 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4468 l2cap_chan_unlock(chan);
4471 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4472 struct l2cap_cmd_hdr *cmd,
4473 u16 cmd_len, void *data)
4475 struct l2cap_move_chan_rsp *rsp = data;
4478 if (cmd_len != sizeof(*rsp))
4481 icid = le16_to_cpu(rsp->icid);
4482 result = le16_to_cpu(rsp->result);
4484 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4486 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4487 l2cap_move_continue(conn, icid, result);
4489 l2cap_move_fail(conn, cmd->ident, icid, result);
4494 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4495 struct l2cap_cmd_hdr *cmd,
4496 u16 cmd_len, void *data)
4498 struct l2cap_move_chan_cfm *cfm = data;
4499 struct l2cap_chan *chan;
4502 if (cmd_len != sizeof(*cfm))
4505 icid = le16_to_cpu(cfm->icid);
4506 result = le16_to_cpu(cfm->result);
4508 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4510 chan = l2cap_get_chan_by_dcid(conn, icid);
4512 /* Spec requires a response even if the icid was not found */
4513 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4517 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4518 if (result == L2CAP_MC_CONFIRMED) {
4519 chan->local_amp_id = chan->move_id;
4520 if (!chan->local_amp_id)
4521 __release_logical_link(chan);
4523 chan->move_id = chan->local_amp_id;
4526 l2cap_move_done(chan);
4529 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4531 l2cap_chan_unlock(chan);
4536 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4537 struct l2cap_cmd_hdr *cmd,
4538 u16 cmd_len, void *data)
4540 struct l2cap_move_chan_cfm_rsp *rsp = data;
4543 if (cmd_len != sizeof(*rsp))
4546 icid = le16_to_cpu(rsp->icid);
4548 BT_DBG("icid 0x%4.4x", icid);
4553 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4558 if (min > max || min < 6 || max > 3200)
4561 if (to_multiplier < 10 || to_multiplier > 3200)
4564 if (max >= to_multiplier * 8)
4567 max_latency = (to_multiplier * 8 / max) - 1;
4568 if (latency > 499 || latency > max_latency)
4574 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4575 struct l2cap_cmd_hdr *cmd,
4578 struct hci_conn *hcon = conn->hcon;
4579 struct l2cap_conn_param_update_req *req;
4580 struct l2cap_conn_param_update_rsp rsp;
4581 u16 min, max, latency, to_multiplier, cmd_len;
4584 if (!(hcon->link_mode & HCI_LM_MASTER))
4587 cmd_len = __le16_to_cpu(cmd->len);
4588 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4591 req = (struct l2cap_conn_param_update_req *) data;
4592 min = __le16_to_cpu(req->min);
4593 max = __le16_to_cpu(req->max);
4594 latency = __le16_to_cpu(req->latency);
4595 to_multiplier = __le16_to_cpu(req->to_multiplier);
4597 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4598 min, max, latency, to_multiplier);
4600 memset(&rsp, 0, sizeof(rsp));
4602 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4604 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4606 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4608 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4612 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4617 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4618 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4623 switch (cmd->code) {
4624 case L2CAP_COMMAND_REJ:
4625 l2cap_command_rej(conn, cmd, data);
4628 case L2CAP_CONN_REQ:
4629 err = l2cap_connect_req(conn, cmd, data);
4632 case L2CAP_CONN_RSP:
4633 case L2CAP_CREATE_CHAN_RSP:
4634 err = l2cap_connect_create_rsp(conn, cmd, data);
4637 case L2CAP_CONF_REQ:
4638 err = l2cap_config_req(conn, cmd, cmd_len, data);
4641 case L2CAP_CONF_RSP:
4642 err = l2cap_config_rsp(conn, cmd, data);
4645 case L2CAP_DISCONN_REQ:
4646 err = l2cap_disconnect_req(conn, cmd, data);
4649 case L2CAP_DISCONN_RSP:
4650 err = l2cap_disconnect_rsp(conn, cmd, data);
4653 case L2CAP_ECHO_REQ:
4654 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4657 case L2CAP_ECHO_RSP:
4660 case L2CAP_INFO_REQ:
4661 err = l2cap_information_req(conn, cmd, data);
4664 case L2CAP_INFO_RSP:
4665 err = l2cap_information_rsp(conn, cmd, data);
4668 case L2CAP_CREATE_CHAN_REQ:
4669 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4672 case L2CAP_MOVE_CHAN_REQ:
4673 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4676 case L2CAP_MOVE_CHAN_RSP:
4677 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4680 case L2CAP_MOVE_CHAN_CFM:
4681 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4684 case L2CAP_MOVE_CHAN_CFM_RSP:
4685 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4689 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4697 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4698 struct l2cap_cmd_hdr *cmd, u8 *data)
4700 switch (cmd->code) {
4701 case L2CAP_COMMAND_REJ:
4704 case L2CAP_CONN_PARAM_UPDATE_REQ:
4705 return l2cap_conn_param_update_req(conn, cmd, data);
4707 case L2CAP_CONN_PARAM_UPDATE_RSP:
4711 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4716 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4717 struct sk_buff *skb)
4719 u8 *data = skb->data;
4721 struct l2cap_cmd_hdr cmd;
4724 l2cap_raw_recv(conn, skb);
4726 while (len >= L2CAP_CMD_HDR_SIZE) {
4728 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4729 data += L2CAP_CMD_HDR_SIZE;
4730 len -= L2CAP_CMD_HDR_SIZE;
4732 cmd_len = le16_to_cpu(cmd.len);
4734 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
4737 if (cmd_len > len || !cmd.ident) {
4738 BT_DBG("corrupted command");
4742 if (conn->hcon->type == LE_LINK)
4743 err = l2cap_le_sig_cmd(conn, &cmd, data);
4745 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4748 struct l2cap_cmd_rej_unk rej;
4750 BT_ERR("Wrong link type (%d)", err);
4752 /* FIXME: Map err to a valid reason */
4753 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4754 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
4765 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4767 u16 our_fcs, rcv_fcs;
4770 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4771 hdr_size = L2CAP_EXT_HDR_SIZE;
4773 hdr_size = L2CAP_ENH_HDR_SIZE;
4775 if (chan->fcs == L2CAP_FCS_CRC16) {
4776 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4777 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4778 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4780 if (our_fcs != rcv_fcs)
4786 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4788 struct l2cap_ctrl control;
4790 BT_DBG("chan %p", chan);
4792 memset(&control, 0, sizeof(control));
4795 control.reqseq = chan->buffer_seq;
4796 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4798 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4799 control.super = L2CAP_SUPER_RNR;
4800 l2cap_send_sframe(chan, &control);
4803 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4804 chan->unacked_frames > 0)
4805 __set_retrans_timer(chan);
4807 /* Send pending iframes */
4808 l2cap_ertm_send(chan);
4810 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4811 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4812 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4815 control.super = L2CAP_SUPER_RR;
4816 l2cap_send_sframe(chan, &control);
4820 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
4821 struct sk_buff **last_frag)
4823 /* skb->len reflects data in skb as well as all fragments
4824 * skb->data_len reflects only data in fragments
4826 if (!skb_has_frag_list(skb))
4827 skb_shinfo(skb)->frag_list = new_frag;
4829 new_frag->next = NULL;
4831 (*last_frag)->next = new_frag;
4832 *last_frag = new_frag;
4834 skb->len += new_frag->len;
4835 skb->data_len += new_frag->len;
4836 skb->truesize += new_frag->truesize;
4839 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4840 struct l2cap_ctrl *control)
4844 switch (control->sar) {
4845 case L2CAP_SAR_UNSEGMENTED:
4849 err = chan->ops->recv(chan, skb);
4852 case L2CAP_SAR_START:
4856 chan->sdu_len = get_unaligned_le16(skb->data);
4857 skb_pull(skb, L2CAP_SDULEN_SIZE);
4859 if (chan->sdu_len > chan->imtu) {
4864 if (skb->len >= chan->sdu_len)
4868 chan->sdu_last_frag = skb;
4874 case L2CAP_SAR_CONTINUE:
4878 append_skb_frag(chan->sdu, skb,
4879 &chan->sdu_last_frag);
4882 if (chan->sdu->len >= chan->sdu_len)
4892 append_skb_frag(chan->sdu, skb,
4893 &chan->sdu_last_frag);
4896 if (chan->sdu->len != chan->sdu_len)
4899 err = chan->ops->recv(chan, chan->sdu);
4902 /* Reassembly complete */
4904 chan->sdu_last_frag = NULL;
4912 kfree_skb(chan->sdu);
4914 chan->sdu_last_frag = NULL;
4921 static int l2cap_resegment(struct l2cap_chan *chan)
4927 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4931 if (chan->mode != L2CAP_MODE_ERTM)
4934 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4935 l2cap_tx(chan, NULL, NULL, event);
4938 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4941 /* Pass sequential frames to l2cap_reassemble_sdu()
4942 * until a gap is encountered.
4945 BT_DBG("chan %p", chan);
4947 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4948 struct sk_buff *skb;
4949 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4950 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4952 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4957 skb_unlink(skb, &chan->srej_q);
4958 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4959 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4964 if (skb_queue_empty(&chan->srej_q)) {
4965 chan->rx_state = L2CAP_RX_STATE_RECV;
4966 l2cap_send_ack(chan);
4972 static void l2cap_handle_srej(struct l2cap_chan *chan,
4973 struct l2cap_ctrl *control)
4975 struct sk_buff *skb;
4977 BT_DBG("chan %p, control %p", chan, control);
4979 if (control->reqseq == chan->next_tx_seq) {
4980 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4981 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4985 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4988 BT_DBG("Seq %d not available for retransmission",
4993 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4994 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4995 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4999 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5001 if (control->poll) {
5002 l2cap_pass_to_tx(chan, control);
5004 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5005 l2cap_retransmit(chan, control);
5006 l2cap_ertm_send(chan);
5008 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5009 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5010 chan->srej_save_reqseq = control->reqseq;
5013 l2cap_pass_to_tx_fbit(chan, control);
5015 if (control->final) {
5016 if (chan->srej_save_reqseq != control->reqseq ||
5017 !test_and_clear_bit(CONN_SREJ_ACT,
5019 l2cap_retransmit(chan, control);
5021 l2cap_retransmit(chan, control);
5022 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5023 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5024 chan->srej_save_reqseq = control->reqseq;
5030 static void l2cap_handle_rej(struct l2cap_chan *chan,
5031 struct l2cap_ctrl *control)
5033 struct sk_buff *skb;
5035 BT_DBG("chan %p, control %p", chan, control);
5037 if (control->reqseq == chan->next_tx_seq) {
5038 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5039 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5043 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5045 if (chan->max_tx && skb &&
5046 bt_cb(skb)->control.retries >= chan->max_tx) {
5047 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5048 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5052 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5054 l2cap_pass_to_tx(chan, control);
5056 if (control->final) {
5057 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5058 l2cap_retransmit_all(chan, control);
5060 l2cap_retransmit_all(chan, control);
5061 l2cap_ertm_send(chan);
5062 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5063 set_bit(CONN_REJ_ACT, &chan->conn_state);
5067 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5069 BT_DBG("chan %p, txseq %d", chan, txseq);
5071 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5072 chan->expected_tx_seq);
5074 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5075 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5077 /* See notes below regarding "double poll" and
5080 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5081 BT_DBG("Invalid/Ignore - after SREJ");
5082 return L2CAP_TXSEQ_INVALID_IGNORE;
5084 BT_DBG("Invalid - in window after SREJ sent");
5085 return L2CAP_TXSEQ_INVALID;
5089 if (chan->srej_list.head == txseq) {
5090 BT_DBG("Expected SREJ");
5091 return L2CAP_TXSEQ_EXPECTED_SREJ;
5094 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5095 BT_DBG("Duplicate SREJ - txseq already stored");
5096 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5099 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5100 BT_DBG("Unexpected SREJ - not requested");
5101 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5105 if (chan->expected_tx_seq == txseq) {
5106 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5108 BT_DBG("Invalid - txseq outside tx window");
5109 return L2CAP_TXSEQ_INVALID;
5112 return L2CAP_TXSEQ_EXPECTED;
5116 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5117 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5118 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5119 return L2CAP_TXSEQ_DUPLICATE;
5122 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5123 /* A source of invalid packets is a "double poll" condition,
5124 * where delays cause us to send multiple poll packets. If
5125 * the remote stack receives and processes both polls,
5126 * sequence numbers can wrap around in such a way that a
5127 * resent frame has a sequence number that looks like new data
5128 * with a sequence gap. This would trigger an erroneous SREJ
5131 * Fortunately, this is impossible with a tx window that's
5132 * less than half of the maximum sequence number, which allows
5133 * invalid frames to be safely ignored.
5135 * With tx window sizes greater than half of the tx window
5136 * maximum, the frame is invalid and cannot be ignored. This
5137 * causes a disconnect.
5140 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5141 BT_DBG("Invalid/Ignore - txseq outside tx window");
5142 return L2CAP_TXSEQ_INVALID_IGNORE;
5144 BT_DBG("Invalid - txseq outside tx window");
5145 return L2CAP_TXSEQ_INVALID;
5148 BT_DBG("Unexpected - txseq indicates missing frames");
5149 return L2CAP_TXSEQ_UNEXPECTED;
5153 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5154 struct l2cap_ctrl *control,
5155 struct sk_buff *skb, u8 event)
5158 bool skb_in_use = 0;
5160 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5164 case L2CAP_EV_RECV_IFRAME:
5165 switch (l2cap_classify_txseq(chan, control->txseq)) {
5166 case L2CAP_TXSEQ_EXPECTED:
5167 l2cap_pass_to_tx(chan, control);
5169 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5170 BT_DBG("Busy, discarding expected seq %d",
5175 chan->expected_tx_seq = __next_seq(chan,
5178 chan->buffer_seq = chan->expected_tx_seq;
5181 err = l2cap_reassemble_sdu(chan, skb, control);
5185 if (control->final) {
5186 if (!test_and_clear_bit(CONN_REJ_ACT,
5187 &chan->conn_state)) {
5189 l2cap_retransmit_all(chan, control);
5190 l2cap_ertm_send(chan);
5194 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5195 l2cap_send_ack(chan);
5197 case L2CAP_TXSEQ_UNEXPECTED:
5198 l2cap_pass_to_tx(chan, control);
5200 /* Can't issue SREJ frames in the local busy state.
5201 * Drop this frame, it will be seen as missing
5202 * when local busy is exited.
5204 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5205 BT_DBG("Busy, discarding unexpected seq %d",
5210 /* There was a gap in the sequence, so an SREJ
5211 * must be sent for each missing frame. The
5212 * current frame is stored for later use.
5214 skb_queue_tail(&chan->srej_q, skb);
5216 BT_DBG("Queued %p (queue len %d)", skb,
5217 skb_queue_len(&chan->srej_q));
5219 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5220 l2cap_seq_list_clear(&chan->srej_list);
5221 l2cap_send_srej(chan, control->txseq);
5223 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5225 case L2CAP_TXSEQ_DUPLICATE:
5226 l2cap_pass_to_tx(chan, control);
5228 case L2CAP_TXSEQ_INVALID_IGNORE:
5230 case L2CAP_TXSEQ_INVALID:
5232 l2cap_send_disconn_req(chan->conn, chan,
5237 case L2CAP_EV_RECV_RR:
5238 l2cap_pass_to_tx(chan, control);
5239 if (control->final) {
5240 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5242 if (!test_and_clear_bit(CONN_REJ_ACT,
5243 &chan->conn_state)) {
5245 l2cap_retransmit_all(chan, control);
5248 l2cap_ertm_send(chan);
5249 } else if (control->poll) {
5250 l2cap_send_i_or_rr_or_rnr(chan);
5252 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5253 &chan->conn_state) &&
5254 chan->unacked_frames)
5255 __set_retrans_timer(chan);
5257 l2cap_ertm_send(chan);
5260 case L2CAP_EV_RECV_RNR:
5261 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5262 l2cap_pass_to_tx(chan, control);
5263 if (control && control->poll) {
5264 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5265 l2cap_send_rr_or_rnr(chan, 0);
5267 __clear_retrans_timer(chan);
5268 l2cap_seq_list_clear(&chan->retrans_list);
5270 case L2CAP_EV_RECV_REJ:
5271 l2cap_handle_rej(chan, control);
5273 case L2CAP_EV_RECV_SREJ:
5274 l2cap_handle_srej(chan, control);
5280 if (skb && !skb_in_use) {
5281 BT_DBG("Freeing %p", skb);
5288 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5289 struct l2cap_ctrl *control,
5290 struct sk_buff *skb, u8 event)
5293 u16 txseq = control->txseq;
5294 bool skb_in_use = 0;
5296 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5300 case L2CAP_EV_RECV_IFRAME:
5301 switch (l2cap_classify_txseq(chan, txseq)) {
5302 case L2CAP_TXSEQ_EXPECTED:
5303 /* Keep frame for reassembly later */
5304 l2cap_pass_to_tx(chan, control);
5305 skb_queue_tail(&chan->srej_q, skb);
5307 BT_DBG("Queued %p (queue len %d)", skb,
5308 skb_queue_len(&chan->srej_q));
5310 chan->expected_tx_seq = __next_seq(chan, txseq);
5312 case L2CAP_TXSEQ_EXPECTED_SREJ:
5313 l2cap_seq_list_pop(&chan->srej_list);
5315 l2cap_pass_to_tx(chan, control);
5316 skb_queue_tail(&chan->srej_q, skb);
5318 BT_DBG("Queued %p (queue len %d)", skb,
5319 skb_queue_len(&chan->srej_q));
5321 err = l2cap_rx_queued_iframes(chan);
5326 case L2CAP_TXSEQ_UNEXPECTED:
5327 /* Got a frame that can't be reassembled yet.
5328 * Save it for later, and send SREJs to cover
5329 * the missing frames.
5331 skb_queue_tail(&chan->srej_q, skb);
5333 BT_DBG("Queued %p (queue len %d)", skb,
5334 skb_queue_len(&chan->srej_q));
5336 l2cap_pass_to_tx(chan, control);
5337 l2cap_send_srej(chan, control->txseq);
5339 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5340 /* This frame was requested with an SREJ, but
5341 * some expected retransmitted frames are
5342 * missing. Request retransmission of missing
5345 skb_queue_tail(&chan->srej_q, skb);
5347 BT_DBG("Queued %p (queue len %d)", skb,
5348 skb_queue_len(&chan->srej_q));
5350 l2cap_pass_to_tx(chan, control);
5351 l2cap_send_srej_list(chan, control->txseq);
5353 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5354 /* We've already queued this frame. Drop this copy. */
5355 l2cap_pass_to_tx(chan, control);
5357 case L2CAP_TXSEQ_DUPLICATE:
5358 /* Expecting a later sequence number, so this frame
5359 * was already received. Ignore it completely.
5362 case L2CAP_TXSEQ_INVALID_IGNORE:
5364 case L2CAP_TXSEQ_INVALID:
5366 l2cap_send_disconn_req(chan->conn, chan,
5371 case L2CAP_EV_RECV_RR:
5372 l2cap_pass_to_tx(chan, control);
5373 if (control->final) {
5374 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5376 if (!test_and_clear_bit(CONN_REJ_ACT,
5377 &chan->conn_state)) {
5379 l2cap_retransmit_all(chan, control);
5382 l2cap_ertm_send(chan);
5383 } else if (control->poll) {
5384 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5385 &chan->conn_state) &&
5386 chan->unacked_frames) {
5387 __set_retrans_timer(chan);
5390 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5391 l2cap_send_srej_tail(chan);
5393 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5394 &chan->conn_state) &&
5395 chan->unacked_frames)
5396 __set_retrans_timer(chan);
5398 l2cap_send_ack(chan);
5401 case L2CAP_EV_RECV_RNR:
5402 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5403 l2cap_pass_to_tx(chan, control);
5404 if (control->poll) {
5405 l2cap_send_srej_tail(chan);
5407 struct l2cap_ctrl rr_control;
5408 memset(&rr_control, 0, sizeof(rr_control));
5409 rr_control.sframe = 1;
5410 rr_control.super = L2CAP_SUPER_RR;
5411 rr_control.reqseq = chan->buffer_seq;
5412 l2cap_send_sframe(chan, &rr_control);
5416 case L2CAP_EV_RECV_REJ:
5417 l2cap_handle_rej(chan, control);
5419 case L2CAP_EV_RECV_SREJ:
5420 l2cap_handle_srej(chan, control);
5424 if (skb && !skb_in_use) {
5425 BT_DBG("Freeing %p", skb);
5432 static int l2cap_finish_move(struct l2cap_chan *chan)
5434 BT_DBG("chan %p", chan);
5436 chan->rx_state = L2CAP_RX_STATE_RECV;
5439 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5441 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5443 return l2cap_resegment(chan);
5446 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5447 struct l2cap_ctrl *control,
5448 struct sk_buff *skb, u8 event)
5452 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5458 l2cap_process_reqseq(chan, control->reqseq);
5460 if (!skb_queue_empty(&chan->tx_q))
5461 chan->tx_send_head = skb_peek(&chan->tx_q);
5463 chan->tx_send_head = NULL;
5465 /* Rewind next_tx_seq to the point expected
5468 chan->next_tx_seq = control->reqseq;
5469 chan->unacked_frames = 0;
5471 err = l2cap_finish_move(chan);
5475 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5476 l2cap_send_i_or_rr_or_rnr(chan);
5478 if (event == L2CAP_EV_RECV_IFRAME)
5481 return l2cap_rx_state_recv(chan, control, NULL, event);
5484 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5485 struct l2cap_ctrl *control,
5486 struct sk_buff *skb, u8 event)
5490 if (!control->final)
5493 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5495 chan->rx_state = L2CAP_RX_STATE_RECV;
5496 l2cap_process_reqseq(chan, control->reqseq);
5498 if (!skb_queue_empty(&chan->tx_q))
5499 chan->tx_send_head = skb_peek(&chan->tx_q);
5501 chan->tx_send_head = NULL;
5503 /* Rewind next_tx_seq to the point expected
5506 chan->next_tx_seq = control->reqseq;
5507 chan->unacked_frames = 0;
5510 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5512 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5514 err = l2cap_resegment(chan);
5517 err = l2cap_rx_state_recv(chan, control, skb, event);
5522 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5524 /* Make sure reqseq is for a packet that has been sent but not acked */
5527 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5528 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5531 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5532 struct sk_buff *skb, u8 event)
5536 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5537 control, skb, event, chan->rx_state);
5539 if (__valid_reqseq(chan, control->reqseq)) {
5540 switch (chan->rx_state) {
5541 case L2CAP_RX_STATE_RECV:
5542 err = l2cap_rx_state_recv(chan, control, skb, event);
5544 case L2CAP_RX_STATE_SREJ_SENT:
5545 err = l2cap_rx_state_srej_sent(chan, control, skb,
5548 case L2CAP_RX_STATE_WAIT_P:
5549 err = l2cap_rx_state_wait_p(chan, control, skb, event);
5551 case L2CAP_RX_STATE_WAIT_F:
5552 err = l2cap_rx_state_wait_f(chan, control, skb, event);
5559 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5560 control->reqseq, chan->next_tx_seq,
5561 chan->expected_ack_seq);
5562 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5568 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5569 struct sk_buff *skb)
5573 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5576 if (l2cap_classify_txseq(chan, control->txseq) ==
5577 L2CAP_TXSEQ_EXPECTED) {
5578 l2cap_pass_to_tx(chan, control);
5580 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5581 __next_seq(chan, chan->buffer_seq));
5583 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5585 l2cap_reassemble_sdu(chan, skb, control);
5588 kfree_skb(chan->sdu);
5591 chan->sdu_last_frag = NULL;
5595 BT_DBG("Freeing %p", skb);
5600 chan->last_acked_seq = control->txseq;
5601 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5606 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5608 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5612 __unpack_control(chan, skb);
5617 * We can just drop the corrupted I-frame here.
5618 * Receiver will miss it and start proper recovery
5619 * procedures and ask for retransmission.
5621 if (l2cap_check_fcs(chan, skb))
5624 if (!control->sframe && control->sar == L2CAP_SAR_START)
5625 len -= L2CAP_SDULEN_SIZE;
5627 if (chan->fcs == L2CAP_FCS_CRC16)
5628 len -= L2CAP_FCS_SIZE;
5630 if (len > chan->mps) {
5631 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5635 if (!control->sframe) {
5638 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5639 control->sar, control->reqseq, control->final,
5642 /* Validate F-bit - F=0 always valid, F=1 only
5643 * valid in TX WAIT_F
5645 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5648 if (chan->mode != L2CAP_MODE_STREAMING) {
5649 event = L2CAP_EV_RECV_IFRAME;
5650 err = l2cap_rx(chan, control, skb, event);
5652 err = l2cap_stream_rx(chan, control, skb);
5656 l2cap_send_disconn_req(chan->conn, chan,
5659 const u8 rx_func_to_event[4] = {
5660 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5661 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5664 /* Only I-frames are expected in streaming mode */
5665 if (chan->mode == L2CAP_MODE_STREAMING)
5668 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5669 control->reqseq, control->final, control->poll,
5674 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5678 /* Validate F and P bits */
5679 if (control->final && (control->poll ||
5680 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5683 event = rx_func_to_event[control->super];
5684 if (l2cap_rx(chan, control, skb, event))
5685 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5695 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5696 struct sk_buff *skb)
5698 struct l2cap_chan *chan;
5700 chan = l2cap_get_chan_by_scid(conn, cid);
5702 if (cid == L2CAP_CID_A2MP) {
5703 chan = a2mp_channel_create(conn, skb);
5709 l2cap_chan_lock(chan);
5711 BT_DBG("unknown cid 0x%4.4x", cid);
5712 /* Drop packet and return */
5718 BT_DBG("chan %p, len %d", chan, skb->len);
5720 if (chan->state != BT_CONNECTED)
5723 switch (chan->mode) {
5724 case L2CAP_MODE_BASIC:
5725 /* If socket recv buffers overflows we drop data here
5726 * which is *bad* because L2CAP has to be reliable.
5727 * But we don't have any other choice. L2CAP doesn't
5728 * provide flow control mechanism. */
5730 if (chan->imtu < skb->len)
5733 if (!chan->ops->recv(chan, skb))
5737 case L2CAP_MODE_ERTM:
5738 case L2CAP_MODE_STREAMING:
5739 l2cap_data_rcv(chan, skb);
5743 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5751 l2cap_chan_unlock(chan);
5754 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5755 struct sk_buff *skb)
5757 struct l2cap_chan *chan;
5759 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5763 BT_DBG("chan %p, len %d", chan, skb->len);
5765 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5768 if (chan->imtu < skb->len)
5771 if (!chan->ops->recv(chan, skb))
5778 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5779 struct sk_buff *skb)
5781 struct l2cap_chan *chan;
5783 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5787 BT_DBG("chan %p, len %d", chan, skb->len);
5789 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5792 if (chan->imtu < skb->len)
5795 if (!chan->ops->recv(chan, skb))
5802 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5804 struct l2cap_hdr *lh = (void *) skb->data;
5808 skb_pull(skb, L2CAP_HDR_SIZE);
5809 cid = __le16_to_cpu(lh->cid);
5810 len = __le16_to_cpu(lh->len);
5812 if (len != skb->len) {
5817 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5820 case L2CAP_CID_LE_SIGNALING:
5821 case L2CAP_CID_SIGNALING:
5822 l2cap_sig_channel(conn, skb);
5825 case L2CAP_CID_CONN_LESS:
5826 psm = get_unaligned((__le16 *) skb->data);
5827 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5828 l2cap_conless_channel(conn, psm, skb);
5831 case L2CAP_CID_LE_DATA:
5832 l2cap_att_channel(conn, cid, skb);
5836 if (smp_sig_channel(conn, skb))
5837 l2cap_conn_del(conn->hcon, EACCES);
5841 l2cap_data_channel(conn, cid, skb);
5846 /* ---- L2CAP interface with lower layer (HCI) ---- */
5848 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5850 int exact = 0, lm1 = 0, lm2 = 0;
5851 struct l2cap_chan *c;
5853 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5855 /* Find listening sockets and check their link_mode */
5856 read_lock(&chan_list_lock);
5857 list_for_each_entry(c, &chan_list, global_l) {
5858 struct sock *sk = c->sk;
5860 if (c->state != BT_LISTEN)
5863 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5864 lm1 |= HCI_LM_ACCEPT;
5865 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5866 lm1 |= HCI_LM_MASTER;
5868 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5869 lm2 |= HCI_LM_ACCEPT;
5870 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5871 lm2 |= HCI_LM_MASTER;
5874 read_unlock(&chan_list_lock);
5876 return exact ? lm1 : lm2;
5879 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5881 struct l2cap_conn *conn;
5883 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5886 conn = l2cap_conn_add(hcon, status);
5888 l2cap_conn_ready(conn);
5890 l2cap_conn_del(hcon, bt_to_errno(status));
5894 int l2cap_disconn_ind(struct hci_conn *hcon)
5896 struct l2cap_conn *conn = hcon->l2cap_data;
5898 BT_DBG("hcon %p", hcon);
5901 return HCI_ERROR_REMOTE_USER_TERM;
5902 return conn->disc_reason;
5905 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5907 BT_DBG("hcon %p reason %d", hcon, reason);
5909 l2cap_conn_del(hcon, bt_to_errno(reason));
5912 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5914 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5917 if (encrypt == 0x00) {
5918 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5919 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5920 } else if (chan->sec_level == BT_SECURITY_HIGH)
5921 l2cap_chan_close(chan, ECONNREFUSED);
5923 if (chan->sec_level == BT_SECURITY_MEDIUM)
5924 __clear_chan_timer(chan);
5928 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5930 struct l2cap_conn *conn = hcon->l2cap_data;
5931 struct l2cap_chan *chan;
5936 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5938 if (hcon->type == LE_LINK) {
5939 if (!status && encrypt)
5940 smp_distribute_keys(conn, 0);
5941 cancel_delayed_work(&conn->security_timer);
5944 mutex_lock(&conn->chan_lock);
5946 list_for_each_entry(chan, &conn->chan_l, list) {
5947 l2cap_chan_lock(chan);
5949 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5950 state_to_string(chan->state));
5952 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5953 l2cap_chan_unlock(chan);
5957 if (chan->scid == L2CAP_CID_LE_DATA) {
5958 if (!status && encrypt) {
5959 chan->sec_level = hcon->sec_level;
5960 l2cap_chan_ready(chan);
5963 l2cap_chan_unlock(chan);
5967 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5968 l2cap_chan_unlock(chan);
5972 if (!status && (chan->state == BT_CONNECTED ||
5973 chan->state == BT_CONFIG)) {
5974 struct sock *sk = chan->sk;
5976 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5977 sk->sk_state_change(sk);
5979 l2cap_check_encryption(chan, encrypt);
5980 l2cap_chan_unlock(chan);
5984 if (chan->state == BT_CONNECT) {
5986 l2cap_start_connection(chan);
5988 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5990 } else if (chan->state == BT_CONNECT2) {
5991 struct sock *sk = chan->sk;
5992 struct l2cap_conn_rsp rsp;
5998 if (test_bit(BT_SK_DEFER_SETUP,
5999 &bt_sk(sk)->flags)) {
6000 res = L2CAP_CR_PEND;
6001 stat = L2CAP_CS_AUTHOR_PEND;
6002 chan->ops->defer(chan);
6004 __l2cap_state_change(chan, BT_CONFIG);
6005 res = L2CAP_CR_SUCCESS;
6006 stat = L2CAP_CS_NO_INFO;
6009 __l2cap_state_change(chan, BT_DISCONN);
6010 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6011 res = L2CAP_CR_SEC_BLOCK;
6012 stat = L2CAP_CS_NO_INFO;
6017 rsp.scid = cpu_to_le16(chan->dcid);
6018 rsp.dcid = cpu_to_le16(chan->scid);
6019 rsp.result = cpu_to_le16(res);
6020 rsp.status = cpu_to_le16(stat);
6021 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6024 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6025 res == L2CAP_CR_SUCCESS) {
6027 set_bit(CONF_REQ_SENT, &chan->conf_state);
6028 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6030 l2cap_build_conf_req(chan, buf),
6032 chan->num_conf_req++;
6036 l2cap_chan_unlock(chan);
6039 mutex_unlock(&conn->chan_lock);
6044 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6046 struct l2cap_conn *conn = hcon->l2cap_data;
6047 struct l2cap_hdr *hdr;
6050 /* For AMP controller do not create l2cap conn */
6051 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6055 conn = l2cap_conn_add(hcon, 0);
6060 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6064 case ACL_START_NO_FLUSH:
6067 BT_ERR("Unexpected start frame (len %d)", skb->len);
6068 kfree_skb(conn->rx_skb);
6069 conn->rx_skb = NULL;
6071 l2cap_conn_unreliable(conn, ECOMM);
6074 /* Start fragment always begin with Basic L2CAP header */
6075 if (skb->len < L2CAP_HDR_SIZE) {
6076 BT_ERR("Frame is too short (len %d)", skb->len);
6077 l2cap_conn_unreliable(conn, ECOMM);
6081 hdr = (struct l2cap_hdr *) skb->data;
6082 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6084 if (len == skb->len) {
6085 /* Complete frame received */
6086 l2cap_recv_frame(conn, skb);
6090 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6092 if (skb->len > len) {
6093 BT_ERR("Frame is too long (len %d, expected len %d)",
6095 l2cap_conn_unreliable(conn, ECOMM);
6099 /* Allocate skb for the complete frame (with header) */
6100 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6104 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6106 conn->rx_len = len - skb->len;
6110 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6112 if (!conn->rx_len) {
6113 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6114 l2cap_conn_unreliable(conn, ECOMM);
6118 if (skb->len > conn->rx_len) {
6119 BT_ERR("Fragment is too long (len %d, expected %d)",
6120 skb->len, conn->rx_len);
6121 kfree_skb(conn->rx_skb);
6122 conn->rx_skb = NULL;
6124 l2cap_conn_unreliable(conn, ECOMM);
6128 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6130 conn->rx_len -= skb->len;
6132 if (!conn->rx_len) {
6133 /* Complete frame received */
6134 l2cap_recv_frame(conn, conn->rx_skb);
6135 conn->rx_skb = NULL;
6145 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6147 struct l2cap_chan *c;
6149 read_lock(&chan_list_lock);
6151 list_for_each_entry(c, &chan_list, global_l) {
6152 struct sock *sk = c->sk;
6154 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6155 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6156 c->state, __le16_to_cpu(c->psm),
6157 c->scid, c->dcid, c->imtu, c->omtu,
6158 c->sec_level, c->mode);
6161 read_unlock(&chan_list_lock);
6166 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6168 return single_open(file, l2cap_debugfs_show, inode->i_private);
6171 static const struct file_operations l2cap_debugfs_fops = {
6172 .open = l2cap_debugfs_open,
6174 .llseek = seq_lseek,
6175 .release = single_release,
6178 static struct dentry *l2cap_debugfs;
6180 int __init l2cap_init(void)
6184 err = l2cap_init_sockets();
6189 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6190 NULL, &l2cap_debugfs_fops);
6192 BT_ERR("Failed to create L2CAP debug file");
6198 void l2cap_exit(void)
6200 debugfs_remove(l2cap_debugfs);
6201 l2cap_cleanup_sockets();
6204 module_param(disable_ertm, bool, 0644);
6205 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");