2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
43 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
44 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46 static LIST_HEAD(chan_list);
47 static DEFINE_RWLOCK(chan_list_lock);
49 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
50 u8 code, u8 ident, u16 dlen, void *data);
51 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
54 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
55 struct l2cap_chan *chan, int err);
57 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
58 struct sk_buff_head *skbs, u8 event);
60 /* ---- L2CAP channels ---- */
62 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
66 list_for_each_entry(c, &conn->chan_l, list) {
73 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
77 list_for_each_entry(c, &conn->chan_l, list) {
84 /* Find channel with given SCID.
85 * Returns locked channel. */
86 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 mutex_lock(&conn->chan_lock);
91 c = __l2cap_get_chan_by_scid(conn, cid);
94 mutex_unlock(&conn->chan_lock);
99 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 struct l2cap_chan *c;
103 list_for_each_entry(c, &conn->chan_l, list) {
104 if (c->ident == ident)
110 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 struct l2cap_chan *c;
114 list_for_each_entry(c, &chan_list, global_l) {
115 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
121 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
125 write_lock(&chan_list_lock);
127 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
140 for (p = 0x1001; p < 0x1100; p += 2)
141 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
142 chan->psm = cpu_to_le16(p);
143 chan->sport = cpu_to_le16(p);
150 write_unlock(&chan_list_lock);
154 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 write_lock(&chan_list_lock);
160 write_unlock(&chan_list_lock);
165 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 u16 cid = L2CAP_CID_DYN_START;
169 for (; cid < L2CAP_CID_DYN_END; cid++) {
170 if (!__l2cap_get_chan_by_scid(conn, cid))
177 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
180 state_to_string(state));
183 chan->ops->state_change(chan, state);
186 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 struct sock *sk = chan->sk;
191 __l2cap_state_change(chan, state);
195 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 struct sock *sk = chan->sk;
202 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 struct sock *sk = chan->sk;
207 __l2cap_chan_set_err(chan, err);
211 static void __set_retrans_timer(struct l2cap_chan *chan)
213 if (!delayed_work_pending(&chan->monitor_timer) &&
214 chan->retrans_timeout) {
215 l2cap_set_timer(chan, &chan->retrans_timer,
216 msecs_to_jiffies(chan->retrans_timeout));
220 static void __set_monitor_timer(struct l2cap_chan *chan)
222 __clear_retrans_timer(chan);
223 if (chan->monitor_timeout) {
224 l2cap_set_timer(chan, &chan->monitor_timer,
225 msecs_to_jiffies(chan->monitor_timeout));
229 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
234 skb_queue_walk(head, skb) {
235 if (bt_cb(skb)->control.txseq == seq)
242 /* ---- L2CAP sequence number lists ---- */
244 /* For ERTM, ordered lists of sequence numbers must be tracked for
245 * SREJ requests that are received and for frames that are to be
246 * retransmitted. These seq_list functions implement a singly-linked
247 * list in an array, where membership in the list can also be checked
248 * in constant time. Items can also be added to the tail of the list
249 * and removed from the head in constant time, without further memory
253 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 size_t alloc_size, i;
257 /* Allocated size is a power of 2 to map sequence numbers
258 * (which may be up to 14 bits) in to a smaller array that is
259 * sized for the negotiated ERTM transmit windows.
261 alloc_size = roundup_pow_of_two(size);
263 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
267 seq_list->mask = alloc_size - 1;
268 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
269 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
270 for (i = 0; i < alloc_size; i++)
271 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
276 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 kfree(seq_list->list);
281 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
284 /* Constant-time check for list membership */
285 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
288 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 u16 mask = seq_list->mask;
292 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
293 /* In case someone tries to pop the head of an empty list */
294 return L2CAP_SEQ_LIST_CLEAR;
295 } else if (seq_list->head == seq) {
296 /* Head can be removed in constant time */
297 seq_list->head = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
301 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
302 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
305 /* Walk the list to find the sequence number */
306 u16 prev = seq_list->head;
307 while (seq_list->list[prev & mask] != seq) {
308 prev = seq_list->list[prev & mask];
309 if (prev == L2CAP_SEQ_LIST_TAIL)
310 return L2CAP_SEQ_LIST_CLEAR;
313 /* Unlink the number from the list and clear it */
314 seq_list->list[prev & mask] = seq_list->list[seq & mask];
315 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
316 if (seq_list->tail == seq)
317 seq_list->tail = prev;
322 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 /* Remove the head in constant time */
325 return l2cap_seq_list_remove(seq_list, seq_list->head);
328 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
332 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
335 for (i = 0; i <= seq_list->mask; i++)
336 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
342 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 u16 mask = seq_list->mask;
346 /* All appends happen in constant time */
348 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
351 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
352 seq_list->head = seq;
354 seq_list->list[seq_list->tail & mask] = seq;
356 seq_list->tail = seq;
357 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
360 static void l2cap_chan_timeout(struct work_struct *work)
362 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 struct l2cap_conn *conn = chan->conn;
367 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369 mutex_lock(&conn->chan_lock);
370 l2cap_chan_lock(chan);
372 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
373 reason = ECONNREFUSED;
374 else if (chan->state == BT_CONNECT &&
375 chan->sec_level != BT_SECURITY_SDP)
376 reason = ECONNREFUSED;
380 l2cap_chan_close(chan, reason);
382 l2cap_chan_unlock(chan);
384 chan->ops->close(chan);
385 mutex_unlock(&conn->chan_lock);
387 l2cap_chan_put(chan);
390 struct l2cap_chan *l2cap_chan_create(void)
392 struct l2cap_chan *chan;
394 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
398 mutex_init(&chan->lock);
400 write_lock(&chan_list_lock);
401 list_add(&chan->global_l, &chan_list);
402 write_unlock(&chan_list_lock);
404 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406 chan->state = BT_OPEN;
408 atomic_set(&chan->refcnt, 1);
410 /* This flag is cleared in l2cap_chan_ready() */
411 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413 BT_DBG("chan %p", chan);
418 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 write_lock(&chan_list_lock);
421 list_del(&chan->global_l);
422 write_unlock(&chan_list_lock);
424 l2cap_chan_put(chan);
427 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 chan->fcs = L2CAP_FCS_CRC16;
430 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
431 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
432 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
433 chan->sec_level = BT_SECURITY_LOW;
435 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
438 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
440 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
441 __le16_to_cpu(chan->psm), chan->dcid);
443 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
447 switch (chan->chan_type) {
448 case L2CAP_CHAN_CONN_ORIENTED:
449 if (conn->hcon->type == LE_LINK) {
451 chan->omtu = L2CAP_LE_DEFAULT_MTU;
452 chan->scid = L2CAP_CID_LE_DATA;
453 chan->dcid = L2CAP_CID_LE_DATA;
455 /* Alloc CID for connection-oriented socket */
456 chan->scid = l2cap_alloc_cid(conn);
457 chan->omtu = L2CAP_DEFAULT_MTU;
461 case L2CAP_CHAN_CONN_LESS:
462 /* Connectionless socket */
463 chan->scid = L2CAP_CID_CONN_LESS;
464 chan->dcid = L2CAP_CID_CONN_LESS;
465 chan->omtu = L2CAP_DEFAULT_MTU;
469 /* Raw socket can send/recv signalling messages only */
470 chan->scid = L2CAP_CID_SIGNALING;
471 chan->dcid = L2CAP_CID_SIGNALING;
472 chan->omtu = L2CAP_DEFAULT_MTU;
475 chan->local_id = L2CAP_BESTEFFORT_ID;
476 chan->local_stype = L2CAP_SERV_BESTEFFORT;
477 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
478 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
479 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
480 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
482 l2cap_chan_hold(chan);
484 list_add(&chan->list, &conn->chan_l);
487 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
489 mutex_lock(&conn->chan_lock);
490 __l2cap_chan_add(conn, chan);
491 mutex_unlock(&conn->chan_lock);
494 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
496 struct l2cap_conn *conn = chan->conn;
498 __clear_chan_timer(chan);
500 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
503 /* Delete from channel list */
504 list_del(&chan->list);
506 l2cap_chan_put(chan);
509 hci_conn_put(conn->hcon);
512 if (chan->ops->teardown)
513 chan->ops->teardown(chan, err);
515 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
519 case L2CAP_MODE_BASIC:
522 case L2CAP_MODE_ERTM:
523 __clear_retrans_timer(chan);
524 __clear_monitor_timer(chan);
525 __clear_ack_timer(chan);
527 skb_queue_purge(&chan->srej_q);
529 l2cap_seq_list_free(&chan->srej_list);
530 l2cap_seq_list_free(&chan->retrans_list);
534 case L2CAP_MODE_STREAMING:
535 skb_queue_purge(&chan->tx_q);
542 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
544 struct l2cap_conn *conn = chan->conn;
545 struct sock *sk = chan->sk;
547 BT_DBG("chan %p state %s sk %p", chan,
548 state_to_string(chan->state), sk);
550 switch (chan->state) {
552 if (chan->ops->teardown)
553 chan->ops->teardown(chan, 0);
558 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
559 conn->hcon->type == ACL_LINK) {
560 __set_chan_timer(chan, sk->sk_sndtimeo);
561 l2cap_send_disconn_req(conn, chan, reason);
563 l2cap_chan_del(chan, reason);
567 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
568 conn->hcon->type == ACL_LINK) {
569 struct l2cap_conn_rsp rsp;
572 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
573 result = L2CAP_CR_SEC_BLOCK;
575 result = L2CAP_CR_BAD_PSM;
576 l2cap_state_change(chan, BT_DISCONN);
578 rsp.scid = cpu_to_le16(chan->dcid);
579 rsp.dcid = cpu_to_le16(chan->scid);
580 rsp.result = cpu_to_le16(result);
581 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
582 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
586 l2cap_chan_del(chan, reason);
591 l2cap_chan_del(chan, reason);
595 if (chan->ops->teardown)
596 chan->ops->teardown(chan, 0);
601 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
603 if (chan->chan_type == L2CAP_CHAN_RAW) {
604 switch (chan->sec_level) {
605 case BT_SECURITY_HIGH:
606 return HCI_AT_DEDICATED_BONDING_MITM;
607 case BT_SECURITY_MEDIUM:
608 return HCI_AT_DEDICATED_BONDING;
610 return HCI_AT_NO_BONDING;
612 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
613 if (chan->sec_level == BT_SECURITY_LOW)
614 chan->sec_level = BT_SECURITY_SDP;
616 if (chan->sec_level == BT_SECURITY_HIGH)
617 return HCI_AT_NO_BONDING_MITM;
619 return HCI_AT_NO_BONDING;
621 switch (chan->sec_level) {
622 case BT_SECURITY_HIGH:
623 return HCI_AT_GENERAL_BONDING_MITM;
624 case BT_SECURITY_MEDIUM:
625 return HCI_AT_GENERAL_BONDING;
627 return HCI_AT_NO_BONDING;
632 /* Service level security */
633 int l2cap_chan_check_security(struct l2cap_chan *chan)
635 struct l2cap_conn *conn = chan->conn;
638 auth_type = l2cap_get_auth_type(chan);
640 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
643 static u8 l2cap_get_ident(struct l2cap_conn *conn)
647 /* Get next available identificator.
648 * 1 - 128 are used by kernel.
649 * 129 - 199 are reserved.
650 * 200 - 254 are used by utilities like l2ping, etc.
653 spin_lock(&conn->lock);
655 if (++conn->tx_ident > 128)
660 spin_unlock(&conn->lock);
665 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
667 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
670 BT_DBG("code 0x%2.2x", code);
675 if (lmp_no_flush_capable(conn->hcon->hdev))
676 flags = ACL_START_NO_FLUSH;
680 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
681 skb->priority = HCI_PRIO_MAX;
683 hci_send_acl(conn->hchan, skb, flags);
686 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
688 struct hci_conn *hcon = chan->conn->hcon;
691 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
694 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
695 lmp_no_flush_capable(hcon->hdev))
696 flags = ACL_START_NO_FLUSH;
700 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
701 hci_send_acl(chan->conn->hchan, skb, flags);
704 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
706 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
707 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
709 if (enh & L2CAP_CTRL_FRAME_TYPE) {
712 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
713 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
720 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
721 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
728 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
730 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
731 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
733 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
736 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
737 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
744 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
745 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
752 static inline void __unpack_control(struct l2cap_chan *chan,
755 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
756 __unpack_extended_control(get_unaligned_le32(skb->data),
757 &bt_cb(skb)->control);
758 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
760 __unpack_enhanced_control(get_unaligned_le16(skb->data),
761 &bt_cb(skb)->control);
762 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
766 static u32 __pack_extended_control(struct l2cap_ctrl *control)
770 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
771 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
773 if (control->sframe) {
774 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
775 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
776 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
778 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
779 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
785 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
789 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
790 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
792 if (control->sframe) {
793 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
794 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
795 packed |= L2CAP_CTRL_FRAME_TYPE;
797 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
798 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
804 static inline void __pack_control(struct l2cap_chan *chan,
805 struct l2cap_ctrl *control,
808 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
809 put_unaligned_le32(__pack_extended_control(control),
810 skb->data + L2CAP_HDR_SIZE);
812 put_unaligned_le16(__pack_enhanced_control(control),
813 skb->data + L2CAP_HDR_SIZE);
817 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
821 struct l2cap_hdr *lh;
824 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
825 hlen = L2CAP_EXT_HDR_SIZE;
827 hlen = L2CAP_ENH_HDR_SIZE;
829 if (chan->fcs == L2CAP_FCS_CRC16)
830 hlen += L2CAP_FCS_SIZE;
832 skb = bt_skb_alloc(hlen, GFP_KERNEL);
835 return ERR_PTR(-ENOMEM);
837 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
838 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
839 lh->cid = cpu_to_le16(chan->dcid);
841 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
842 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
844 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
846 if (chan->fcs == L2CAP_FCS_CRC16) {
847 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
848 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
851 skb->priority = HCI_PRIO_MAX;
855 static void l2cap_send_sframe(struct l2cap_chan *chan,
856 struct l2cap_ctrl *control)
861 BT_DBG("chan %p, control %p", chan, control);
863 if (!control->sframe)
866 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
870 if (control->super == L2CAP_SUPER_RR)
871 clear_bit(CONN_RNR_SENT, &chan->conn_state);
872 else if (control->super == L2CAP_SUPER_RNR)
873 set_bit(CONN_RNR_SENT, &chan->conn_state);
875 if (control->super != L2CAP_SUPER_SREJ) {
876 chan->last_acked_seq = control->reqseq;
877 __clear_ack_timer(chan);
880 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
881 control->final, control->poll, control->super);
883 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
884 control_field = __pack_extended_control(control);
886 control_field = __pack_enhanced_control(control);
888 skb = l2cap_create_sframe_pdu(chan, control_field);
890 l2cap_do_send(chan, skb);
893 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
895 struct l2cap_ctrl control;
897 BT_DBG("chan %p, poll %d", chan, poll);
899 memset(&control, 0, sizeof(control));
903 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
904 control.super = L2CAP_SUPER_RNR;
906 control.super = L2CAP_SUPER_RR;
908 control.reqseq = chan->buffer_seq;
909 l2cap_send_sframe(chan, &control);
912 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
914 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
917 static void l2cap_send_conn_req(struct l2cap_chan *chan)
919 struct l2cap_conn *conn = chan->conn;
920 struct l2cap_conn_req req;
922 req.scid = cpu_to_le16(chan->scid);
925 chan->ident = l2cap_get_ident(conn);
927 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
929 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
932 static void l2cap_chan_ready(struct l2cap_chan *chan)
934 /* This clears all conf flags, including CONF_NOT_COMPLETE */
935 chan->conf_state = 0;
936 __clear_chan_timer(chan);
938 chan->state = BT_CONNECTED;
940 if (chan->ops->ready)
941 chan->ops->ready(chan);
944 static void l2cap_do_start(struct l2cap_chan *chan)
946 struct l2cap_conn *conn = chan->conn;
948 if (conn->hcon->type == LE_LINK) {
949 l2cap_chan_ready(chan);
953 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
954 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
957 if (l2cap_chan_check_security(chan) &&
958 __l2cap_no_conn_pending(chan))
959 l2cap_send_conn_req(chan);
961 struct l2cap_info_req req;
962 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
964 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
965 conn->info_ident = l2cap_get_ident(conn);
967 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
969 l2cap_send_cmd(conn, conn->info_ident,
970 L2CAP_INFO_REQ, sizeof(req), &req);
974 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
976 u32 local_feat_mask = l2cap_feat_mask;
978 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
981 case L2CAP_MODE_ERTM:
982 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
983 case L2CAP_MODE_STREAMING:
984 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
990 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
992 struct sock *sk = chan->sk;
993 struct l2cap_disconn_req req;
998 if (chan->mode == L2CAP_MODE_ERTM) {
999 __clear_retrans_timer(chan);
1000 __clear_monitor_timer(chan);
1001 __clear_ack_timer(chan);
1004 req.dcid = cpu_to_le16(chan->dcid);
1005 req.scid = cpu_to_le16(chan->scid);
1006 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1007 L2CAP_DISCONN_REQ, sizeof(req), &req);
1010 __l2cap_state_change(chan, BT_DISCONN);
1011 __l2cap_chan_set_err(chan, err);
1015 /* ---- L2CAP connections ---- */
1016 static void l2cap_conn_start(struct l2cap_conn *conn)
1018 struct l2cap_chan *chan, *tmp;
1020 BT_DBG("conn %p", conn);
1022 mutex_lock(&conn->chan_lock);
1024 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1025 struct sock *sk = chan->sk;
1027 l2cap_chan_lock(chan);
1029 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1030 l2cap_chan_unlock(chan);
1034 if (chan->state == BT_CONNECT) {
1035 if (!l2cap_chan_check_security(chan) ||
1036 !__l2cap_no_conn_pending(chan)) {
1037 l2cap_chan_unlock(chan);
1041 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1042 && test_bit(CONF_STATE2_DEVICE,
1043 &chan->conf_state)) {
1044 l2cap_chan_close(chan, ECONNRESET);
1045 l2cap_chan_unlock(chan);
1049 l2cap_send_conn_req(chan);
1051 } else if (chan->state == BT_CONNECT2) {
1052 struct l2cap_conn_rsp rsp;
1054 rsp.scid = cpu_to_le16(chan->dcid);
1055 rsp.dcid = cpu_to_le16(chan->scid);
1057 if (l2cap_chan_check_security(chan)) {
1059 if (test_bit(BT_SK_DEFER_SETUP,
1060 &bt_sk(sk)->flags)) {
1061 struct sock *parent = bt_sk(sk)->parent;
1062 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1063 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1065 parent->sk_data_ready(parent, 0);
1068 __l2cap_state_change(chan, BT_CONFIG);
1069 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1070 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1074 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1075 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1078 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1081 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1082 rsp.result != L2CAP_CR_SUCCESS) {
1083 l2cap_chan_unlock(chan);
1087 set_bit(CONF_REQ_SENT, &chan->conf_state);
1088 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1089 l2cap_build_conf_req(chan, buf), buf);
1090 chan->num_conf_req++;
1093 l2cap_chan_unlock(chan);
1096 mutex_unlock(&conn->chan_lock);
1099 /* Find socket with cid and source/destination bdaddr.
1100 * Returns closest match, locked.
1102 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1106 struct l2cap_chan *c, *c1 = NULL;
1108 read_lock(&chan_list_lock);
1110 list_for_each_entry(c, &chan_list, global_l) {
1111 struct sock *sk = c->sk;
1113 if (state && c->state != state)
1116 if (c->scid == cid) {
1117 int src_match, dst_match;
1118 int src_any, dst_any;
1121 src_match = !bacmp(&bt_sk(sk)->src, src);
1122 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1123 if (src_match && dst_match) {
1124 read_unlock(&chan_list_lock);
1129 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1130 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1131 if ((src_match && dst_any) || (src_any && dst_match) ||
1132 (src_any && dst_any))
1137 read_unlock(&chan_list_lock);
1142 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1144 struct sock *parent, *sk;
1145 struct l2cap_chan *chan, *pchan;
1149 /* Check if we have socket listening on cid */
1150 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1151 conn->src, conn->dst);
1159 /* Check for backlog size */
1160 if (sk_acceptq_is_full(parent)) {
1161 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1165 chan = pchan->ops->new_connection(pchan);
1171 hci_conn_hold(conn->hcon);
1173 bacpy(&bt_sk(sk)->src, conn->src);
1174 bacpy(&bt_sk(sk)->dst, conn->dst);
1176 bt_accept_enqueue(parent, sk);
1178 l2cap_chan_add(conn, chan);
1180 l2cap_chan_ready(chan);
1183 release_sock(parent);
1186 static void l2cap_conn_ready(struct l2cap_conn *conn)
1188 struct l2cap_chan *chan;
1190 BT_DBG("conn %p", conn);
1192 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1193 l2cap_le_conn_ready(conn);
1195 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1196 smp_conn_security(conn, conn->hcon->pending_sec_level);
1198 mutex_lock(&conn->chan_lock);
1200 list_for_each_entry(chan, &conn->chan_l, list) {
1202 l2cap_chan_lock(chan);
1204 if (conn->hcon->type == LE_LINK) {
1205 if (smp_conn_security(conn, chan->sec_level))
1206 l2cap_chan_ready(chan);
1208 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1209 struct sock *sk = chan->sk;
1210 __clear_chan_timer(chan);
1212 __l2cap_state_change(chan, BT_CONNECTED);
1213 sk->sk_state_change(sk);
1216 } else if (chan->state == BT_CONNECT)
1217 l2cap_do_start(chan);
1219 l2cap_chan_unlock(chan);
1222 mutex_unlock(&conn->chan_lock);
1225 /* Notify sockets that we cannot guaranty reliability anymore */
1226 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1228 struct l2cap_chan *chan;
1230 BT_DBG("conn %p", conn);
1232 mutex_lock(&conn->chan_lock);
1234 list_for_each_entry(chan, &conn->chan_l, list) {
1235 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1236 __l2cap_chan_set_err(chan, err);
1239 mutex_unlock(&conn->chan_lock);
1242 static void l2cap_info_timeout(struct work_struct *work)
1244 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1247 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1248 conn->info_ident = 0;
1250 l2cap_conn_start(conn);
1253 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1255 struct l2cap_conn *conn = hcon->l2cap_data;
1256 struct l2cap_chan *chan, *l;
1261 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1263 kfree_skb(conn->rx_skb);
1265 mutex_lock(&conn->chan_lock);
1268 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1269 l2cap_chan_hold(chan);
1270 l2cap_chan_lock(chan);
1272 l2cap_chan_del(chan, err);
1274 l2cap_chan_unlock(chan);
1276 chan->ops->close(chan);
1277 l2cap_chan_put(chan);
1280 mutex_unlock(&conn->chan_lock);
1282 hci_chan_del(conn->hchan);
1284 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1285 cancel_delayed_work_sync(&conn->info_timer);
1287 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1288 cancel_delayed_work_sync(&conn->security_timer);
1289 smp_chan_destroy(conn);
1292 hcon->l2cap_data = NULL;
1296 static void security_timeout(struct work_struct *work)
1298 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1299 security_timer.work);
1301 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1304 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1306 struct l2cap_conn *conn = hcon->l2cap_data;
1307 struct hci_chan *hchan;
1312 hchan = hci_chan_create(hcon);
1316 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1318 hci_chan_del(hchan);
1322 hcon->l2cap_data = conn;
1324 conn->hchan = hchan;
1326 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1328 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1329 conn->mtu = hcon->hdev->le_mtu;
1331 conn->mtu = hcon->hdev->acl_mtu;
1333 conn->src = &hcon->hdev->bdaddr;
1334 conn->dst = &hcon->dst;
1336 conn->feat_mask = 0;
1338 spin_lock_init(&conn->lock);
1339 mutex_init(&conn->chan_lock);
1341 INIT_LIST_HEAD(&conn->chan_l);
1343 if (hcon->type == LE_LINK)
1344 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1346 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1348 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1353 /* ---- Socket interface ---- */
1355 /* Find socket with psm and source / destination bdaddr.
1356 * Returns closest match.
1358 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1362 struct l2cap_chan *c, *c1 = NULL;
1364 read_lock(&chan_list_lock);
1366 list_for_each_entry(c, &chan_list, global_l) {
1367 struct sock *sk = c->sk;
1369 if (state && c->state != state)
1372 if (c->psm == psm) {
1373 int src_match, dst_match;
1374 int src_any, dst_any;
1377 src_match = !bacmp(&bt_sk(sk)->src, src);
1378 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1379 if (src_match && dst_match) {
1380 read_unlock(&chan_list_lock);
1385 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1386 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1387 if ((src_match && dst_any) || (src_any && dst_match) ||
1388 (src_any && dst_any))
1393 read_unlock(&chan_list_lock);
1398 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1399 bdaddr_t *dst, u8 dst_type)
1401 struct sock *sk = chan->sk;
1402 bdaddr_t *src = &bt_sk(sk)->src;
1403 struct l2cap_conn *conn;
1404 struct hci_conn *hcon;
1405 struct hci_dev *hdev;
1409 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1410 dst_type, __le16_to_cpu(chan->psm));
1412 hdev = hci_get_route(dst, src);
1414 return -EHOSTUNREACH;
1418 l2cap_chan_lock(chan);
1420 /* PSM must be odd and lsb of upper byte must be 0 */
1421 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1422 chan->chan_type != L2CAP_CHAN_RAW) {
1427 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1432 switch (chan->mode) {
1433 case L2CAP_MODE_BASIC:
1435 case L2CAP_MODE_ERTM:
1436 case L2CAP_MODE_STREAMING:
1445 switch (chan->state) {
1449 /* Already connecting */
1454 /* Already connected */
1468 /* Set destination address and psm */
1470 bacpy(&bt_sk(sk)->dst, dst);
1476 auth_type = l2cap_get_auth_type(chan);
1478 if (chan->dcid == L2CAP_CID_LE_DATA)
1479 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1480 chan->sec_level, auth_type);
1482 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1483 chan->sec_level, auth_type);
1486 err = PTR_ERR(hcon);
1490 conn = l2cap_conn_add(hcon, 0);
1497 if (hcon->type == LE_LINK) {
1500 if (!list_empty(&conn->chan_l)) {
1509 /* Update source addr of the socket */
1510 bacpy(src, conn->src);
1512 l2cap_chan_unlock(chan);
1513 l2cap_chan_add(conn, chan);
1514 l2cap_chan_lock(chan);
1516 l2cap_state_change(chan, BT_CONNECT);
1517 __set_chan_timer(chan, sk->sk_sndtimeo);
1519 if (hcon->state == BT_CONNECTED) {
1520 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1521 __clear_chan_timer(chan);
1522 if (l2cap_chan_check_security(chan))
1523 l2cap_state_change(chan, BT_CONNECTED);
1525 l2cap_do_start(chan);
1531 l2cap_chan_unlock(chan);
1532 hci_dev_unlock(hdev);
1537 int __l2cap_wait_ack(struct sock *sk)
1539 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1540 DECLARE_WAITQUEUE(wait, current);
1544 add_wait_queue(sk_sleep(sk), &wait);
1545 set_current_state(TASK_INTERRUPTIBLE);
1546 while (chan->unacked_frames > 0 && chan->conn) {
1550 if (signal_pending(current)) {
1551 err = sock_intr_errno(timeo);
1556 timeo = schedule_timeout(timeo);
1558 set_current_state(TASK_INTERRUPTIBLE);
1560 err = sock_error(sk);
1564 set_current_state(TASK_RUNNING);
1565 remove_wait_queue(sk_sleep(sk), &wait);
1569 static void l2cap_monitor_timeout(struct work_struct *work)
1571 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1572 monitor_timer.work);
1574 BT_DBG("chan %p", chan);
1576 l2cap_chan_lock(chan);
1579 l2cap_chan_unlock(chan);
1580 l2cap_chan_put(chan);
1584 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1586 l2cap_chan_unlock(chan);
1587 l2cap_chan_put(chan);
1590 static void l2cap_retrans_timeout(struct work_struct *work)
1592 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1593 retrans_timer.work);
1595 BT_DBG("chan %p", chan);
1597 l2cap_chan_lock(chan);
1600 l2cap_chan_unlock(chan);
1601 l2cap_chan_put(chan);
1605 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1606 l2cap_chan_unlock(chan);
1607 l2cap_chan_put(chan);
1610 static void l2cap_streaming_send(struct l2cap_chan *chan,
1611 struct sk_buff_head *skbs)
1613 struct sk_buff *skb;
1614 struct l2cap_ctrl *control;
1616 BT_DBG("chan %p, skbs %p", chan, skbs);
1618 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1620 while (!skb_queue_empty(&chan->tx_q)) {
1622 skb = skb_dequeue(&chan->tx_q);
1624 bt_cb(skb)->control.retries = 1;
1625 control = &bt_cb(skb)->control;
1627 control->reqseq = 0;
1628 control->txseq = chan->next_tx_seq;
1630 __pack_control(chan, control, skb);
1632 if (chan->fcs == L2CAP_FCS_CRC16) {
1633 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1634 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1637 l2cap_do_send(chan, skb);
1639 BT_DBG("Sent txseq %d", (int)control->txseq);
1641 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1642 chan->frames_sent++;
1646 static int l2cap_ertm_send(struct l2cap_chan *chan)
1648 struct sk_buff *skb, *tx_skb;
1649 struct l2cap_ctrl *control;
1652 BT_DBG("chan %p", chan);
1654 if (chan->state != BT_CONNECTED)
1657 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1660 while (chan->tx_send_head &&
1661 chan->unacked_frames < chan->remote_tx_win &&
1662 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1664 skb = chan->tx_send_head;
1666 bt_cb(skb)->control.retries = 1;
1667 control = &bt_cb(skb)->control;
1669 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1672 control->reqseq = chan->buffer_seq;
1673 chan->last_acked_seq = chan->buffer_seq;
1674 control->txseq = chan->next_tx_seq;
1676 __pack_control(chan, control, skb);
1678 if (chan->fcs == L2CAP_FCS_CRC16) {
1679 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1680 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1683 /* Clone after data has been modified. Data is assumed to be
1684 read-only (for locking purposes) on cloned sk_buffs.
1686 tx_skb = skb_clone(skb, GFP_KERNEL);
1691 __set_retrans_timer(chan);
1693 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1694 chan->unacked_frames++;
1695 chan->frames_sent++;
1698 if (skb_queue_is_last(&chan->tx_q, skb))
1699 chan->tx_send_head = NULL;
1701 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1703 l2cap_do_send(chan, tx_skb);
1704 BT_DBG("Sent txseq %d", (int)control->txseq);
1707 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1708 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1713 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1715 struct l2cap_ctrl control;
1716 struct sk_buff *skb;
1717 struct sk_buff *tx_skb;
1720 BT_DBG("chan %p", chan);
1722 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1725 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1726 seq = l2cap_seq_list_pop(&chan->retrans_list);
1728 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1730 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1735 bt_cb(skb)->control.retries++;
1736 control = bt_cb(skb)->control;
1738 if (chan->max_tx != 0 &&
1739 bt_cb(skb)->control.retries > chan->max_tx) {
1740 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1741 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1742 l2cap_seq_list_clear(&chan->retrans_list);
1746 control.reqseq = chan->buffer_seq;
1747 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1752 if (skb_cloned(skb)) {
1753 /* Cloned sk_buffs are read-only, so we need a
1756 tx_skb = skb_copy(skb, GFP_ATOMIC);
1758 tx_skb = skb_clone(skb, GFP_ATOMIC);
1762 l2cap_seq_list_clear(&chan->retrans_list);
1766 /* Update skb contents */
1767 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1768 put_unaligned_le32(__pack_extended_control(&control),
1769 tx_skb->data + L2CAP_HDR_SIZE);
1771 put_unaligned_le16(__pack_enhanced_control(&control),
1772 tx_skb->data + L2CAP_HDR_SIZE);
1775 if (chan->fcs == L2CAP_FCS_CRC16) {
1776 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1777 put_unaligned_le16(fcs, skb_put(tx_skb,
1781 l2cap_do_send(chan, tx_skb);
1783 BT_DBG("Resent txseq %d", control.txseq);
1785 chan->last_acked_seq = chan->buffer_seq;
1789 static void l2cap_retransmit(struct l2cap_chan *chan,
1790 struct l2cap_ctrl *control)
1792 BT_DBG("chan %p, control %p", chan, control);
1794 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1795 l2cap_ertm_resend(chan);
1798 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1799 struct l2cap_ctrl *control)
1801 struct sk_buff *skb;
1803 BT_DBG("chan %p, control %p", chan, control);
1806 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1808 l2cap_seq_list_clear(&chan->retrans_list);
1810 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1813 if (chan->unacked_frames) {
1814 skb_queue_walk(&chan->tx_q, skb) {
1815 if (bt_cb(skb)->control.txseq == control->reqseq ||
1816 skb == chan->tx_send_head)
1820 skb_queue_walk_from(&chan->tx_q, skb) {
1821 if (skb == chan->tx_send_head)
1824 l2cap_seq_list_append(&chan->retrans_list,
1825 bt_cb(skb)->control.txseq);
1828 l2cap_ertm_resend(chan);
1832 static void l2cap_send_ack(struct l2cap_chan *chan)
1834 struct l2cap_ctrl control;
1835 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1836 chan->last_acked_seq);
1839 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1840 chan, chan->last_acked_seq, chan->buffer_seq);
1842 memset(&control, 0, sizeof(control));
1845 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1846 chan->rx_state == L2CAP_RX_STATE_RECV) {
1847 __clear_ack_timer(chan);
1848 control.super = L2CAP_SUPER_RNR;
1849 control.reqseq = chan->buffer_seq;
1850 l2cap_send_sframe(chan, &control);
1852 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1853 l2cap_ertm_send(chan);
1854 /* If any i-frames were sent, they included an ack */
1855 if (chan->buffer_seq == chan->last_acked_seq)
1859 /* Ack now if the tx window is 3/4ths full.
1860 * Calculate without mul or div
1862 threshold = chan->tx_win;
1863 threshold += threshold << 1;
1866 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1869 if (frames_to_ack >= threshold) {
1870 __clear_ack_timer(chan);
1871 control.super = L2CAP_SUPER_RR;
1872 control.reqseq = chan->buffer_seq;
1873 l2cap_send_sframe(chan, &control);
1878 __set_ack_timer(chan);
1882 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1883 struct msghdr *msg, int len,
1884 int count, struct sk_buff *skb)
1886 struct l2cap_conn *conn = chan->conn;
1887 struct sk_buff **frag;
1890 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1896 /* Continuation fragments (no L2CAP header) */
1897 frag = &skb_shinfo(skb)->frag_list;
1899 struct sk_buff *tmp;
1901 count = min_t(unsigned int, conn->mtu, len);
1903 tmp = chan->ops->alloc_skb(chan, count,
1904 msg->msg_flags & MSG_DONTWAIT);
1906 return PTR_ERR(tmp);
1910 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1913 (*frag)->priority = skb->priority;
1918 skb->len += (*frag)->len;
1919 skb->data_len += (*frag)->len;
1921 frag = &(*frag)->next;
1927 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1928 struct msghdr *msg, size_t len,
1931 struct l2cap_conn *conn = chan->conn;
1932 struct sk_buff *skb;
1933 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1934 struct l2cap_hdr *lh;
1936 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1938 count = min_t(unsigned int, (conn->mtu - hlen), len);
1940 skb = chan->ops->alloc_skb(chan, count + hlen,
1941 msg->msg_flags & MSG_DONTWAIT);
1945 skb->priority = priority;
1947 /* Create L2CAP header */
1948 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1949 lh->cid = cpu_to_le16(chan->dcid);
1950 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1951 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1953 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1954 if (unlikely(err < 0)) {
1956 return ERR_PTR(err);
1961 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1962 struct msghdr *msg, size_t len,
1965 struct l2cap_conn *conn = chan->conn;
1966 struct sk_buff *skb;
1968 struct l2cap_hdr *lh;
1970 BT_DBG("chan %p len %d", chan, (int)len);
1972 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1974 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1975 msg->msg_flags & MSG_DONTWAIT);
1979 skb->priority = priority;
1981 /* Create L2CAP header */
1982 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1983 lh->cid = cpu_to_le16(chan->dcid);
1984 lh->len = cpu_to_le16(len);
1986 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1987 if (unlikely(err < 0)) {
1989 return ERR_PTR(err);
1994 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1995 struct msghdr *msg, size_t len,
1998 struct l2cap_conn *conn = chan->conn;
1999 struct sk_buff *skb;
2000 int err, count, hlen;
2001 struct l2cap_hdr *lh;
2003 BT_DBG("chan %p len %d", chan, (int)len);
2006 return ERR_PTR(-ENOTCONN);
2008 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2009 hlen = L2CAP_EXT_HDR_SIZE;
2011 hlen = L2CAP_ENH_HDR_SIZE;
2014 hlen += L2CAP_SDULEN_SIZE;
2016 if (chan->fcs == L2CAP_FCS_CRC16)
2017 hlen += L2CAP_FCS_SIZE;
2019 count = min_t(unsigned int, (conn->mtu - hlen), len);
2021 skb = chan->ops->alloc_skb(chan, count + hlen,
2022 msg->msg_flags & MSG_DONTWAIT);
2026 /* Create L2CAP header */
2027 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2028 lh->cid = cpu_to_le16(chan->dcid);
2029 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2031 /* Control header is populated later */
2032 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2033 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2035 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2038 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2040 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2041 if (unlikely(err < 0)) {
2043 return ERR_PTR(err);
2046 bt_cb(skb)->control.fcs = chan->fcs;
2047 bt_cb(skb)->control.retries = 0;
2051 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2052 struct sk_buff_head *seg_queue,
2053 struct msghdr *msg, size_t len)
2055 struct sk_buff *skb;
2061 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2063 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2064 * so fragmented skbs are not used. The HCI layer's handling
2065 * of fragmented skbs is not compatible with ERTM's queueing.
2068 /* PDU size is derived from the HCI MTU */
2069 pdu_len = chan->conn->mtu;
2071 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2073 /* Adjust for largest possible L2CAP overhead. */
2075 pdu_len -= L2CAP_FCS_SIZE;
2077 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2078 pdu_len -= L2CAP_EXT_HDR_SIZE;
2080 pdu_len -= L2CAP_ENH_HDR_SIZE;
2082 /* Remote device may have requested smaller PDUs */
2083 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2085 if (len <= pdu_len) {
2086 sar = L2CAP_SAR_UNSEGMENTED;
2090 sar = L2CAP_SAR_START;
2092 pdu_len -= L2CAP_SDULEN_SIZE;
2096 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2099 __skb_queue_purge(seg_queue);
2100 return PTR_ERR(skb);
2103 bt_cb(skb)->control.sar = sar;
2104 __skb_queue_tail(seg_queue, skb);
2109 pdu_len += L2CAP_SDULEN_SIZE;
2112 if (len <= pdu_len) {
2113 sar = L2CAP_SAR_END;
2116 sar = L2CAP_SAR_CONTINUE;
2123 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2126 struct sk_buff *skb;
2128 struct sk_buff_head seg_queue;
2130 /* Connectionless channel */
2131 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2132 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2134 return PTR_ERR(skb);
2136 l2cap_do_send(chan, skb);
2140 switch (chan->mode) {
2141 case L2CAP_MODE_BASIC:
2142 /* Check outgoing MTU */
2143 if (len > chan->omtu)
2146 /* Create a basic PDU */
2147 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2149 return PTR_ERR(skb);
2151 l2cap_do_send(chan, skb);
2155 case L2CAP_MODE_ERTM:
2156 case L2CAP_MODE_STREAMING:
2157 /* Check outgoing MTU */
2158 if (len > chan->omtu) {
2163 __skb_queue_head_init(&seg_queue);
2165 /* Do segmentation before calling in to the state machine,
2166 * since it's possible to block while waiting for memory
2169 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2171 /* The channel could have been closed while segmenting,
2172 * check that it is still connected.
2174 if (chan->state != BT_CONNECTED) {
2175 __skb_queue_purge(&seg_queue);
2182 if (chan->mode == L2CAP_MODE_ERTM)
2183 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2185 l2cap_streaming_send(chan, &seg_queue);
2189 /* If the skbs were not queued for sending, they'll still be in
2190 * seg_queue and need to be purged.
2192 __skb_queue_purge(&seg_queue);
2196 BT_DBG("bad state %1.1x", chan->mode);
2203 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2205 struct l2cap_ctrl control;
2208 BT_DBG("chan %p, txseq %d", chan, txseq);
2210 memset(&control, 0, sizeof(control));
2212 control.super = L2CAP_SUPER_SREJ;
2214 for (seq = chan->expected_tx_seq; seq != txseq;
2215 seq = __next_seq(chan, seq)) {
2216 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2217 control.reqseq = seq;
2218 l2cap_send_sframe(chan, &control);
2219 l2cap_seq_list_append(&chan->srej_list, seq);
2223 chan->expected_tx_seq = __next_seq(chan, txseq);
2226 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2228 struct l2cap_ctrl control;
2230 BT_DBG("chan %p", chan);
2232 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2235 memset(&control, 0, sizeof(control));
2237 control.super = L2CAP_SUPER_SREJ;
2238 control.reqseq = chan->srej_list.tail;
2239 l2cap_send_sframe(chan, &control);
2242 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2244 struct l2cap_ctrl control;
2248 BT_DBG("chan %p, txseq %d", chan, txseq);
2250 memset(&control, 0, sizeof(control));
2252 control.super = L2CAP_SUPER_SREJ;
2254 /* Capture initial list head to allow only one pass through the list. */
2255 initial_head = chan->srej_list.head;
2258 seq = l2cap_seq_list_pop(&chan->srej_list);
2259 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2262 control.reqseq = seq;
2263 l2cap_send_sframe(chan, &control);
2264 l2cap_seq_list_append(&chan->srej_list, seq);
2265 } while (chan->srej_list.head != initial_head);
2268 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2270 struct sk_buff *acked_skb;
2273 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2275 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2278 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2279 chan->expected_ack_seq, chan->unacked_frames);
2281 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2282 ackseq = __next_seq(chan, ackseq)) {
2284 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2286 skb_unlink(acked_skb, &chan->tx_q);
2287 kfree_skb(acked_skb);
2288 chan->unacked_frames--;
2292 chan->expected_ack_seq = reqseq;
2294 if (chan->unacked_frames == 0)
2295 __clear_retrans_timer(chan);
2297 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2300 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2302 BT_DBG("chan %p", chan);
2304 chan->expected_tx_seq = chan->buffer_seq;
2305 l2cap_seq_list_clear(&chan->srej_list);
2306 skb_queue_purge(&chan->srej_q);
2307 chan->rx_state = L2CAP_RX_STATE_RECV;
2310 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2311 struct l2cap_ctrl *control,
2312 struct sk_buff_head *skbs, u8 event)
2314 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2318 case L2CAP_EV_DATA_REQUEST:
2319 if (chan->tx_send_head == NULL)
2320 chan->tx_send_head = skb_peek(skbs);
2322 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2323 l2cap_ertm_send(chan);
2325 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2326 BT_DBG("Enter LOCAL_BUSY");
2327 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2329 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2330 /* The SREJ_SENT state must be aborted if we are to
2331 * enter the LOCAL_BUSY state.
2333 l2cap_abort_rx_srej_sent(chan);
2336 l2cap_send_ack(chan);
2339 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2340 BT_DBG("Exit LOCAL_BUSY");
2341 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2343 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2344 struct l2cap_ctrl local_control;
2346 memset(&local_control, 0, sizeof(local_control));
2347 local_control.sframe = 1;
2348 local_control.super = L2CAP_SUPER_RR;
2349 local_control.poll = 1;
2350 local_control.reqseq = chan->buffer_seq;
2351 l2cap_send_sframe(chan, &local_control);
2353 chan->retry_count = 1;
2354 __set_monitor_timer(chan);
2355 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2358 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2359 l2cap_process_reqseq(chan, control->reqseq);
2361 case L2CAP_EV_EXPLICIT_POLL:
2362 l2cap_send_rr_or_rnr(chan, 1);
2363 chan->retry_count = 1;
2364 __set_monitor_timer(chan);
2365 __clear_ack_timer(chan);
2366 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2368 case L2CAP_EV_RETRANS_TO:
2369 l2cap_send_rr_or_rnr(chan, 1);
2370 chan->retry_count = 1;
2371 __set_monitor_timer(chan);
2372 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2374 case L2CAP_EV_RECV_FBIT:
2375 /* Nothing to process */
2382 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2383 struct l2cap_ctrl *control,
2384 struct sk_buff_head *skbs, u8 event)
2386 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2390 case L2CAP_EV_DATA_REQUEST:
2391 if (chan->tx_send_head == NULL)
2392 chan->tx_send_head = skb_peek(skbs);
2393 /* Queue data, but don't send. */
2394 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2396 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2397 BT_DBG("Enter LOCAL_BUSY");
2398 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2400 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2401 /* The SREJ_SENT state must be aborted if we are to
2402 * enter the LOCAL_BUSY state.
2404 l2cap_abort_rx_srej_sent(chan);
2407 l2cap_send_ack(chan);
2410 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2411 BT_DBG("Exit LOCAL_BUSY");
2412 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2414 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2415 struct l2cap_ctrl local_control;
2416 memset(&local_control, 0, sizeof(local_control));
2417 local_control.sframe = 1;
2418 local_control.super = L2CAP_SUPER_RR;
2419 local_control.poll = 1;
2420 local_control.reqseq = chan->buffer_seq;
2421 l2cap_send_sframe(chan, &local_control);
2423 chan->retry_count = 1;
2424 __set_monitor_timer(chan);
2425 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2428 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2429 l2cap_process_reqseq(chan, control->reqseq);
2433 case L2CAP_EV_RECV_FBIT:
2434 if (control && control->final) {
2435 __clear_monitor_timer(chan);
2436 if (chan->unacked_frames > 0)
2437 __set_retrans_timer(chan);
2438 chan->retry_count = 0;
2439 chan->tx_state = L2CAP_TX_STATE_XMIT;
2440 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2443 case L2CAP_EV_EXPLICIT_POLL:
2446 case L2CAP_EV_MONITOR_TO:
2447 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2448 l2cap_send_rr_or_rnr(chan, 1);
2449 __set_monitor_timer(chan);
2450 chan->retry_count++;
2452 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2460 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2461 struct sk_buff_head *skbs, u8 event)
2463 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2464 chan, control, skbs, event, chan->tx_state);
2466 switch (chan->tx_state) {
2467 case L2CAP_TX_STATE_XMIT:
2468 l2cap_tx_state_xmit(chan, control, skbs, event);
2470 case L2CAP_TX_STATE_WAIT_F:
2471 l2cap_tx_state_wait_f(chan, control, skbs, event);
2479 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2480 struct l2cap_ctrl *control)
2482 BT_DBG("chan %p, control %p", chan, control);
2483 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2486 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2487 struct l2cap_ctrl *control)
2489 BT_DBG("chan %p, control %p", chan, control);
2490 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2493 /* Copy frame to all raw sockets on that connection */
2494 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2496 struct sk_buff *nskb;
2497 struct l2cap_chan *chan;
2499 BT_DBG("conn %p", conn);
2501 mutex_lock(&conn->chan_lock);
2503 list_for_each_entry(chan, &conn->chan_l, list) {
2504 struct sock *sk = chan->sk;
2505 if (chan->chan_type != L2CAP_CHAN_RAW)
2508 /* Don't send frame to the socket it came from */
2511 nskb = skb_clone(skb, GFP_ATOMIC);
2515 if (chan->ops->recv(chan, nskb))
2519 mutex_unlock(&conn->chan_lock);
2522 /* ---- L2CAP signalling commands ---- */
2523 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2524 u8 code, u8 ident, u16 dlen, void *data)
2526 struct sk_buff *skb, **frag;
2527 struct l2cap_cmd_hdr *cmd;
2528 struct l2cap_hdr *lh;
2531 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2532 conn, code, ident, dlen);
2534 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2535 count = min_t(unsigned int, conn->mtu, len);
2537 skb = bt_skb_alloc(count, GFP_ATOMIC);
2541 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2542 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2544 if (conn->hcon->type == LE_LINK)
2545 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2547 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2549 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2552 cmd->len = cpu_to_le16(dlen);
2555 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2556 memcpy(skb_put(skb, count), data, count);
2562 /* Continuation fragments (no L2CAP header) */
2563 frag = &skb_shinfo(skb)->frag_list;
2565 count = min_t(unsigned int, conn->mtu, len);
2567 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2571 memcpy(skb_put(*frag, count), data, count);
2576 frag = &(*frag)->next;
2586 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2588 struct l2cap_conf_opt *opt = *ptr;
2591 len = L2CAP_CONF_OPT_SIZE + opt->len;
2599 *val = *((u8 *) opt->val);
2603 *val = get_unaligned_le16(opt->val);
2607 *val = get_unaligned_le32(opt->val);
2611 *val = (unsigned long) opt->val;
2615 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2619 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2621 struct l2cap_conf_opt *opt = *ptr;
2623 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2630 *((u8 *) opt->val) = val;
2634 put_unaligned_le16(val, opt->val);
2638 put_unaligned_le32(val, opt->val);
2642 memcpy(opt->val, (void *) val, len);
2646 *ptr += L2CAP_CONF_OPT_SIZE + len;
2649 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2651 struct l2cap_conf_efs efs;
2653 switch (chan->mode) {
2654 case L2CAP_MODE_ERTM:
2655 efs.id = chan->local_id;
2656 efs.stype = chan->local_stype;
2657 efs.msdu = cpu_to_le16(chan->local_msdu);
2658 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2659 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2660 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2663 case L2CAP_MODE_STREAMING:
2665 efs.stype = L2CAP_SERV_BESTEFFORT;
2666 efs.msdu = cpu_to_le16(chan->local_msdu);
2667 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2676 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2677 (unsigned long) &efs);
2680 static void l2cap_ack_timeout(struct work_struct *work)
2682 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2686 BT_DBG("chan %p", chan);
2688 l2cap_chan_lock(chan);
2690 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2691 chan->last_acked_seq);
2694 l2cap_send_rr_or_rnr(chan, 0);
2696 l2cap_chan_unlock(chan);
2697 l2cap_chan_put(chan);
2700 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2704 chan->next_tx_seq = 0;
2705 chan->expected_tx_seq = 0;
2706 chan->expected_ack_seq = 0;
2707 chan->unacked_frames = 0;
2708 chan->buffer_seq = 0;
2709 chan->frames_sent = 0;
2710 chan->last_acked_seq = 0;
2712 chan->sdu_last_frag = NULL;
2715 skb_queue_head_init(&chan->tx_q);
2717 if (chan->mode != L2CAP_MODE_ERTM)
2720 chan->rx_state = L2CAP_RX_STATE_RECV;
2721 chan->tx_state = L2CAP_TX_STATE_XMIT;
2723 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2724 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2725 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2727 skb_queue_head_init(&chan->srej_q);
2729 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2733 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2735 l2cap_seq_list_free(&chan->srej_list);
2740 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2743 case L2CAP_MODE_STREAMING:
2744 case L2CAP_MODE_ERTM:
2745 if (l2cap_mode_supported(mode, remote_feat_mask))
2749 return L2CAP_MODE_BASIC;
2753 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2755 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2758 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2760 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2763 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2765 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2766 __l2cap_ews_supported(chan)) {
2767 /* use extended control field */
2768 set_bit(FLAG_EXT_CTRL, &chan->flags);
2769 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2771 chan->tx_win = min_t(u16, chan->tx_win,
2772 L2CAP_DEFAULT_TX_WINDOW);
2773 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2777 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2779 struct l2cap_conf_req *req = data;
2780 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2781 void *ptr = req->data;
2784 BT_DBG("chan %p", chan);
2786 if (chan->num_conf_req || chan->num_conf_rsp)
2789 switch (chan->mode) {
2790 case L2CAP_MODE_STREAMING:
2791 case L2CAP_MODE_ERTM:
2792 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2795 if (__l2cap_efs_supported(chan))
2796 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2800 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2805 if (chan->imtu != L2CAP_DEFAULT_MTU)
2806 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2808 switch (chan->mode) {
2809 case L2CAP_MODE_BASIC:
2810 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2811 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2814 rfc.mode = L2CAP_MODE_BASIC;
2816 rfc.max_transmit = 0;
2817 rfc.retrans_timeout = 0;
2818 rfc.monitor_timeout = 0;
2819 rfc.max_pdu_size = 0;
2821 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2822 (unsigned long) &rfc);
2825 case L2CAP_MODE_ERTM:
2826 rfc.mode = L2CAP_MODE_ERTM;
2827 rfc.max_transmit = chan->max_tx;
2828 rfc.retrans_timeout = 0;
2829 rfc.monitor_timeout = 0;
2831 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2832 L2CAP_EXT_HDR_SIZE -
2835 rfc.max_pdu_size = cpu_to_le16(size);
2837 l2cap_txwin_setup(chan);
2839 rfc.txwin_size = min_t(u16, chan->tx_win,
2840 L2CAP_DEFAULT_TX_WINDOW);
2842 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2843 (unsigned long) &rfc);
2845 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2846 l2cap_add_opt_efs(&ptr, chan);
2848 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2851 if (chan->fcs == L2CAP_FCS_NONE ||
2852 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2853 chan->fcs = L2CAP_FCS_NONE;
2854 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2857 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2858 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2862 case L2CAP_MODE_STREAMING:
2863 l2cap_txwin_setup(chan);
2864 rfc.mode = L2CAP_MODE_STREAMING;
2866 rfc.max_transmit = 0;
2867 rfc.retrans_timeout = 0;
2868 rfc.monitor_timeout = 0;
2870 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2871 L2CAP_EXT_HDR_SIZE -
2874 rfc.max_pdu_size = cpu_to_le16(size);
2876 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2877 (unsigned long) &rfc);
2879 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2880 l2cap_add_opt_efs(&ptr, chan);
2882 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2885 if (chan->fcs == L2CAP_FCS_NONE ||
2886 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2887 chan->fcs = L2CAP_FCS_NONE;
2888 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2893 req->dcid = cpu_to_le16(chan->dcid);
2894 req->flags = __constant_cpu_to_le16(0);
2899 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2901 struct l2cap_conf_rsp *rsp = data;
2902 void *ptr = rsp->data;
2903 void *req = chan->conf_req;
2904 int len = chan->conf_len;
2905 int type, hint, olen;
2907 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2908 struct l2cap_conf_efs efs;
2910 u16 mtu = L2CAP_DEFAULT_MTU;
2911 u16 result = L2CAP_CONF_SUCCESS;
2914 BT_DBG("chan %p", chan);
2916 while (len >= L2CAP_CONF_OPT_SIZE) {
2917 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2919 hint = type & L2CAP_CONF_HINT;
2920 type &= L2CAP_CONF_MASK;
2923 case L2CAP_CONF_MTU:
2927 case L2CAP_CONF_FLUSH_TO:
2928 chan->flush_to = val;
2931 case L2CAP_CONF_QOS:
2934 case L2CAP_CONF_RFC:
2935 if (olen == sizeof(rfc))
2936 memcpy(&rfc, (void *) val, olen);
2939 case L2CAP_CONF_FCS:
2940 if (val == L2CAP_FCS_NONE)
2941 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2944 case L2CAP_CONF_EFS:
2946 if (olen == sizeof(efs))
2947 memcpy(&efs, (void *) val, olen);
2950 case L2CAP_CONF_EWS:
2952 return -ECONNREFUSED;
2954 set_bit(FLAG_EXT_CTRL, &chan->flags);
2955 set_bit(CONF_EWS_RECV, &chan->conf_state);
2956 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2957 chan->remote_tx_win = val;
2964 result = L2CAP_CONF_UNKNOWN;
2965 *((u8 *) ptr++) = type;
2970 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2973 switch (chan->mode) {
2974 case L2CAP_MODE_STREAMING:
2975 case L2CAP_MODE_ERTM:
2976 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2977 chan->mode = l2cap_select_mode(rfc.mode,
2978 chan->conn->feat_mask);
2983 if (__l2cap_efs_supported(chan))
2984 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2986 return -ECONNREFUSED;
2989 if (chan->mode != rfc.mode)
2990 return -ECONNREFUSED;
2996 if (chan->mode != rfc.mode) {
2997 result = L2CAP_CONF_UNACCEPT;
2998 rfc.mode = chan->mode;
3000 if (chan->num_conf_rsp == 1)
3001 return -ECONNREFUSED;
3003 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3004 sizeof(rfc), (unsigned long) &rfc);
3007 if (result == L2CAP_CONF_SUCCESS) {
3008 /* Configure output options and let the other side know
3009 * which ones we don't like. */
3011 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3012 result = L2CAP_CONF_UNACCEPT;
3015 set_bit(CONF_MTU_DONE, &chan->conf_state);
3017 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3020 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3021 efs.stype != L2CAP_SERV_NOTRAFIC &&
3022 efs.stype != chan->local_stype) {
3024 result = L2CAP_CONF_UNACCEPT;
3026 if (chan->num_conf_req >= 1)
3027 return -ECONNREFUSED;
3029 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3031 (unsigned long) &efs);
3033 /* Send PENDING Conf Rsp */
3034 result = L2CAP_CONF_PENDING;
3035 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3040 case L2CAP_MODE_BASIC:
3041 chan->fcs = L2CAP_FCS_NONE;
3042 set_bit(CONF_MODE_DONE, &chan->conf_state);
3045 case L2CAP_MODE_ERTM:
3046 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3047 chan->remote_tx_win = rfc.txwin_size;
3049 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3051 chan->remote_max_tx = rfc.max_transmit;
3053 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3055 L2CAP_EXT_HDR_SIZE -
3058 rfc.max_pdu_size = cpu_to_le16(size);
3059 chan->remote_mps = size;
3061 rfc.retrans_timeout =
3062 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3063 rfc.monitor_timeout =
3064 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3066 set_bit(CONF_MODE_DONE, &chan->conf_state);
3068 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3069 sizeof(rfc), (unsigned long) &rfc);
3071 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3072 chan->remote_id = efs.id;
3073 chan->remote_stype = efs.stype;
3074 chan->remote_msdu = le16_to_cpu(efs.msdu);
3075 chan->remote_flush_to =
3076 le32_to_cpu(efs.flush_to);
3077 chan->remote_acc_lat =
3078 le32_to_cpu(efs.acc_lat);
3079 chan->remote_sdu_itime =
3080 le32_to_cpu(efs.sdu_itime);
3081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3082 sizeof(efs), (unsigned long) &efs);
3086 case L2CAP_MODE_STREAMING:
3087 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3089 L2CAP_EXT_HDR_SIZE -
3092 rfc.max_pdu_size = cpu_to_le16(size);
3093 chan->remote_mps = size;
3095 set_bit(CONF_MODE_DONE, &chan->conf_state);
3097 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3098 sizeof(rfc), (unsigned long) &rfc);
3103 result = L2CAP_CONF_UNACCEPT;
3105 memset(&rfc, 0, sizeof(rfc));
3106 rfc.mode = chan->mode;
3109 if (result == L2CAP_CONF_SUCCESS)
3110 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3112 rsp->scid = cpu_to_le16(chan->dcid);
3113 rsp->result = cpu_to_le16(result);
3114 rsp->flags = __constant_cpu_to_le16(0);
3119 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3121 struct l2cap_conf_req *req = data;
3122 void *ptr = req->data;
3125 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3126 struct l2cap_conf_efs efs;
3128 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3130 while (len >= L2CAP_CONF_OPT_SIZE) {
3131 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3134 case L2CAP_CONF_MTU:
3135 if (val < L2CAP_DEFAULT_MIN_MTU) {
3136 *result = L2CAP_CONF_UNACCEPT;
3137 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3140 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3143 case L2CAP_CONF_FLUSH_TO:
3144 chan->flush_to = val;
3145 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3149 case L2CAP_CONF_RFC:
3150 if (olen == sizeof(rfc))
3151 memcpy(&rfc, (void *)val, olen);
3153 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3154 rfc.mode != chan->mode)
3155 return -ECONNREFUSED;
3159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3160 sizeof(rfc), (unsigned long) &rfc);
3163 case L2CAP_CONF_EWS:
3164 chan->tx_win = min_t(u16, val,
3165 L2CAP_DEFAULT_EXT_WINDOW);
3166 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3170 case L2CAP_CONF_EFS:
3171 if (olen == sizeof(efs))
3172 memcpy(&efs, (void *)val, olen);
3174 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3175 efs.stype != L2CAP_SERV_NOTRAFIC &&
3176 efs.stype != chan->local_stype)
3177 return -ECONNREFUSED;
3179 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3180 sizeof(efs), (unsigned long) &efs);
3185 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3186 return -ECONNREFUSED;
3188 chan->mode = rfc.mode;
3190 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3192 case L2CAP_MODE_ERTM:
3193 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3194 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3195 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3197 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3198 chan->local_msdu = le16_to_cpu(efs.msdu);
3199 chan->local_sdu_itime =
3200 le32_to_cpu(efs.sdu_itime);
3201 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3202 chan->local_flush_to =
3203 le32_to_cpu(efs.flush_to);
3207 case L2CAP_MODE_STREAMING:
3208 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3212 req->dcid = cpu_to_le16(chan->dcid);
3213 req->flags = __constant_cpu_to_le16(0);
3218 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3220 struct l2cap_conf_rsp *rsp = data;
3221 void *ptr = rsp->data;
3223 BT_DBG("chan %p", chan);
3225 rsp->scid = cpu_to_le16(chan->dcid);
3226 rsp->result = cpu_to_le16(result);
3227 rsp->flags = cpu_to_le16(flags);
3232 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3234 struct l2cap_conn_rsp rsp;
3235 struct l2cap_conn *conn = chan->conn;
3238 rsp.scid = cpu_to_le16(chan->dcid);
3239 rsp.dcid = cpu_to_le16(chan->scid);
3240 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3241 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3242 l2cap_send_cmd(conn, chan->ident,
3243 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3245 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3248 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3249 l2cap_build_conf_req(chan, buf), buf);
3250 chan->num_conf_req++;
3253 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3257 struct l2cap_conf_rfc rfc;
3259 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3261 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3264 while (len >= L2CAP_CONF_OPT_SIZE) {
3265 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3268 case L2CAP_CONF_RFC:
3269 if (olen == sizeof(rfc))
3270 memcpy(&rfc, (void *)val, olen);
3275 /* Use sane default values in case a misbehaving remote device
3276 * did not send an RFC option.
3278 rfc.mode = chan->mode;
3279 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3280 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3281 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3283 BT_ERR("Expected RFC option was not found, using defaults");
3287 case L2CAP_MODE_ERTM:
3288 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3289 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3290 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3292 case L2CAP_MODE_STREAMING:
3293 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3297 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3299 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3301 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3304 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3305 cmd->ident == conn->info_ident) {
3306 cancel_delayed_work(&conn->info_timer);
3308 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3309 conn->info_ident = 0;
3311 l2cap_conn_start(conn);
3317 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3319 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3320 struct l2cap_conn_rsp rsp;
3321 struct l2cap_chan *chan = NULL, *pchan;
3322 struct sock *parent, *sk = NULL;
3323 int result, status = L2CAP_CS_NO_INFO;
3325 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3326 __le16 psm = req->psm;
3328 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3330 /* Check if we have socket listening on psm */
3331 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3333 result = L2CAP_CR_BAD_PSM;
3339 mutex_lock(&conn->chan_lock);
3342 /* Check if the ACL is secure enough (if not SDP) */
3343 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3344 !hci_conn_check_link_mode(conn->hcon)) {
3345 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3346 result = L2CAP_CR_SEC_BLOCK;
3350 result = L2CAP_CR_NO_MEM;
3352 /* Check for backlog size */
3353 if (sk_acceptq_is_full(parent)) {
3354 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3358 chan = pchan->ops->new_connection(pchan);
3364 /* Check if we already have channel with that dcid */
3365 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3366 if (chan->ops->teardown)
3367 chan->ops->teardown(chan, 0);
3369 chan->ops->close(chan);
3373 hci_conn_hold(conn->hcon);
3375 bacpy(&bt_sk(sk)->src, conn->src);
3376 bacpy(&bt_sk(sk)->dst, conn->dst);
3380 bt_accept_enqueue(parent, sk);
3382 __l2cap_chan_add(conn, chan);
3386 __set_chan_timer(chan, sk->sk_sndtimeo);
3388 chan->ident = cmd->ident;
3390 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3391 if (l2cap_chan_check_security(chan)) {
3392 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3393 __l2cap_state_change(chan, BT_CONNECT2);
3394 result = L2CAP_CR_PEND;
3395 status = L2CAP_CS_AUTHOR_PEND;
3396 parent->sk_data_ready(parent, 0);
3398 __l2cap_state_change(chan, BT_CONFIG);
3399 result = L2CAP_CR_SUCCESS;
3400 status = L2CAP_CS_NO_INFO;
3403 __l2cap_state_change(chan, BT_CONNECT2);
3404 result = L2CAP_CR_PEND;
3405 status = L2CAP_CS_AUTHEN_PEND;
3408 __l2cap_state_change(chan, BT_CONNECT2);
3409 result = L2CAP_CR_PEND;
3410 status = L2CAP_CS_NO_INFO;
3414 release_sock(parent);
3415 mutex_unlock(&conn->chan_lock);
3418 rsp.scid = cpu_to_le16(scid);
3419 rsp.dcid = cpu_to_le16(dcid);
3420 rsp.result = cpu_to_le16(result);
3421 rsp.status = cpu_to_le16(status);
3422 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3424 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3425 struct l2cap_info_req info;
3426 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3429 conn->info_ident = l2cap_get_ident(conn);
3431 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3433 l2cap_send_cmd(conn, conn->info_ident,
3434 L2CAP_INFO_REQ, sizeof(info), &info);
3437 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3438 result == L2CAP_CR_SUCCESS) {
3440 set_bit(CONF_REQ_SENT, &chan->conf_state);
3441 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3442 l2cap_build_conf_req(chan, buf), buf);
3443 chan->num_conf_req++;
3449 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3451 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3452 u16 scid, dcid, result, status;
3453 struct l2cap_chan *chan;
3457 scid = __le16_to_cpu(rsp->scid);
3458 dcid = __le16_to_cpu(rsp->dcid);
3459 result = __le16_to_cpu(rsp->result);
3460 status = __le16_to_cpu(rsp->status);
3462 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3463 dcid, scid, result, status);
3465 mutex_lock(&conn->chan_lock);
3468 chan = __l2cap_get_chan_by_scid(conn, scid);
3474 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3483 l2cap_chan_lock(chan);
3486 case L2CAP_CR_SUCCESS:
3487 l2cap_state_change(chan, BT_CONFIG);
3490 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3492 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3495 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3496 l2cap_build_conf_req(chan, req), req);
3497 chan->num_conf_req++;
3501 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3505 l2cap_chan_del(chan, ECONNREFUSED);
3509 l2cap_chan_unlock(chan);
3512 mutex_unlock(&conn->chan_lock);
3517 static inline void set_default_fcs(struct l2cap_chan *chan)
3519 /* FCS is enabled only in ERTM or streaming mode, if one or both
3522 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3523 chan->fcs = L2CAP_FCS_NONE;
3524 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3525 chan->fcs = L2CAP_FCS_CRC16;
3528 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3530 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3533 struct l2cap_chan *chan;
3536 dcid = __le16_to_cpu(req->dcid);
3537 flags = __le16_to_cpu(req->flags);
3539 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3541 chan = l2cap_get_chan_by_scid(conn, dcid);
3545 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3546 struct l2cap_cmd_rej_cid rej;
3548 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3549 rej.scid = cpu_to_le16(chan->scid);
3550 rej.dcid = cpu_to_le16(chan->dcid);
3552 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3557 /* Reject if config buffer is too small. */
3558 len = cmd_len - sizeof(*req);
3559 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3560 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3561 l2cap_build_conf_rsp(chan, rsp,
3562 L2CAP_CONF_REJECT, flags), rsp);
3567 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3568 chan->conf_len += len;
3570 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3571 /* Incomplete config. Send empty response. */
3572 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3573 l2cap_build_conf_rsp(chan, rsp,
3574 L2CAP_CONF_SUCCESS, flags), rsp);
3578 /* Complete config. */
3579 len = l2cap_parse_conf_req(chan, rsp);
3581 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3585 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3586 chan->num_conf_rsp++;
3588 /* Reset config buffer. */
3591 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3594 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3595 set_default_fcs(chan);
3597 if (chan->mode == L2CAP_MODE_ERTM ||
3598 chan->mode == L2CAP_MODE_STREAMING)
3599 err = l2cap_ertm_init(chan);
3602 l2cap_send_disconn_req(chan->conn, chan, -err);
3604 l2cap_chan_ready(chan);
3609 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3611 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3612 l2cap_build_conf_req(chan, buf), buf);
3613 chan->num_conf_req++;
3616 /* Got Conf Rsp PENDING from remote side and asume we sent
3617 Conf Rsp PENDING in the code above */
3618 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3619 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3621 /* check compatibility */
3623 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3624 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3626 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3627 l2cap_build_conf_rsp(chan, rsp,
3628 L2CAP_CONF_SUCCESS, flags), rsp);
3632 l2cap_chan_unlock(chan);
3636 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3638 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3639 u16 scid, flags, result;
3640 struct l2cap_chan *chan;
3641 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3644 scid = __le16_to_cpu(rsp->scid);
3645 flags = __le16_to_cpu(rsp->flags);
3646 result = __le16_to_cpu(rsp->result);
3648 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3651 chan = l2cap_get_chan_by_scid(conn, scid);
3656 case L2CAP_CONF_SUCCESS:
3657 l2cap_conf_rfc_get(chan, rsp->data, len);
3658 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3661 case L2CAP_CONF_PENDING:
3662 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3664 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3667 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3670 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3674 /* check compatibility */
3676 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3677 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3679 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3680 l2cap_build_conf_rsp(chan, buf,
3681 L2CAP_CONF_SUCCESS, 0x0000), buf);
3685 case L2CAP_CONF_UNACCEPT:
3686 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3689 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3690 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3694 /* throw out any old stored conf requests */
3695 result = L2CAP_CONF_SUCCESS;
3696 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3699 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3703 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3704 L2CAP_CONF_REQ, len, req);
3705 chan->num_conf_req++;
3706 if (result != L2CAP_CONF_SUCCESS)
3712 l2cap_chan_set_err(chan, ECONNRESET);
3714 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3715 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3719 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3722 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3724 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3725 set_default_fcs(chan);
3727 if (chan->mode == L2CAP_MODE_ERTM ||
3728 chan->mode == L2CAP_MODE_STREAMING)
3729 err = l2cap_ertm_init(chan);
3732 l2cap_send_disconn_req(chan->conn, chan, -err);
3734 l2cap_chan_ready(chan);
3738 l2cap_chan_unlock(chan);
3742 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3744 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3745 struct l2cap_disconn_rsp rsp;
3747 struct l2cap_chan *chan;
3750 scid = __le16_to_cpu(req->scid);
3751 dcid = __le16_to_cpu(req->dcid);
3753 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3755 mutex_lock(&conn->chan_lock);
3757 chan = __l2cap_get_chan_by_scid(conn, dcid);
3759 mutex_unlock(&conn->chan_lock);
3763 l2cap_chan_lock(chan);
3767 rsp.dcid = cpu_to_le16(chan->scid);
3768 rsp.scid = cpu_to_le16(chan->dcid);
3769 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3772 sk->sk_shutdown = SHUTDOWN_MASK;
3775 l2cap_chan_hold(chan);
3776 l2cap_chan_del(chan, ECONNRESET);
3778 l2cap_chan_unlock(chan);
3780 chan->ops->close(chan);
3781 l2cap_chan_put(chan);
3783 mutex_unlock(&conn->chan_lock);
3788 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3790 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3792 struct l2cap_chan *chan;
3794 scid = __le16_to_cpu(rsp->scid);
3795 dcid = __le16_to_cpu(rsp->dcid);
3797 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3799 mutex_lock(&conn->chan_lock);
3801 chan = __l2cap_get_chan_by_scid(conn, scid);
3803 mutex_unlock(&conn->chan_lock);
3807 l2cap_chan_lock(chan);
3809 l2cap_chan_hold(chan);
3810 l2cap_chan_del(chan, 0);
3812 l2cap_chan_unlock(chan);
3814 chan->ops->close(chan);
3815 l2cap_chan_put(chan);
3817 mutex_unlock(&conn->chan_lock);
3822 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3824 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3827 type = __le16_to_cpu(req->type);
3829 BT_DBG("type 0x%4.4x", type);
3831 if (type == L2CAP_IT_FEAT_MASK) {
3833 u32 feat_mask = l2cap_feat_mask;
3834 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3835 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3836 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3838 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3841 feat_mask |= L2CAP_FEAT_EXT_FLOW
3842 | L2CAP_FEAT_EXT_WINDOW;
3844 put_unaligned_le32(feat_mask, rsp->data);
3845 l2cap_send_cmd(conn, cmd->ident,
3846 L2CAP_INFO_RSP, sizeof(buf), buf);
3847 } else if (type == L2CAP_IT_FIXED_CHAN) {
3849 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3852 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3854 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3856 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3857 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3858 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3859 l2cap_send_cmd(conn, cmd->ident,
3860 L2CAP_INFO_RSP, sizeof(buf), buf);
3862 struct l2cap_info_rsp rsp;
3863 rsp.type = cpu_to_le16(type);
3864 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3865 l2cap_send_cmd(conn, cmd->ident,
3866 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3872 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3874 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3877 type = __le16_to_cpu(rsp->type);
3878 result = __le16_to_cpu(rsp->result);
3880 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3882 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3883 if (cmd->ident != conn->info_ident ||
3884 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3887 cancel_delayed_work(&conn->info_timer);
3889 if (result != L2CAP_IR_SUCCESS) {
3890 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3891 conn->info_ident = 0;
3893 l2cap_conn_start(conn);
3899 case L2CAP_IT_FEAT_MASK:
3900 conn->feat_mask = get_unaligned_le32(rsp->data);
3902 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3903 struct l2cap_info_req req;
3904 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3906 conn->info_ident = l2cap_get_ident(conn);
3908 l2cap_send_cmd(conn, conn->info_ident,
3909 L2CAP_INFO_REQ, sizeof(req), &req);
3911 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3912 conn->info_ident = 0;
3914 l2cap_conn_start(conn);
3918 case L2CAP_IT_FIXED_CHAN:
3919 conn->fixed_chan_mask = rsp->data[0];
3920 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3921 conn->info_ident = 0;
3923 l2cap_conn_start(conn);
3930 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3931 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3934 struct l2cap_create_chan_req *req = data;
3935 struct l2cap_create_chan_rsp rsp;
3938 if (cmd_len != sizeof(*req))
3944 psm = le16_to_cpu(req->psm);
3945 scid = le16_to_cpu(req->scid);
3947 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3949 /* Placeholder: Always reject */
3951 rsp.scid = cpu_to_le16(scid);
3952 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3953 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3955 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3961 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3962 struct l2cap_cmd_hdr *cmd, void *data)
3964 BT_DBG("conn %p", conn);
3966 return l2cap_connect_rsp(conn, cmd, data);
3969 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3970 u16 icid, u16 result)
3972 struct l2cap_move_chan_rsp rsp;
3974 BT_DBG("icid %d, result %d", icid, result);
3976 rsp.icid = cpu_to_le16(icid);
3977 rsp.result = cpu_to_le16(result);
3979 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3982 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3983 struct l2cap_chan *chan, u16 icid, u16 result)
3985 struct l2cap_move_chan_cfm cfm;
3988 BT_DBG("icid %d, result %d", icid, result);
3990 ident = l2cap_get_ident(conn);
3992 chan->ident = ident;
3994 cfm.icid = cpu_to_le16(icid);
3995 cfm.result = cpu_to_le16(result);
3997 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4000 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4003 struct l2cap_move_chan_cfm_rsp rsp;
4005 BT_DBG("icid %d", icid);
4007 rsp.icid = cpu_to_le16(icid);
4008 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4011 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4012 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4014 struct l2cap_move_chan_req *req = data;
4016 u16 result = L2CAP_MR_NOT_ALLOWED;
4018 if (cmd_len != sizeof(*req))
4021 icid = le16_to_cpu(req->icid);
4023 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4028 /* Placeholder: Always refuse */
4029 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4034 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4035 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4037 struct l2cap_move_chan_rsp *rsp = data;
4040 if (cmd_len != sizeof(*rsp))
4043 icid = le16_to_cpu(rsp->icid);
4044 result = le16_to_cpu(rsp->result);
4046 BT_DBG("icid %d, result %d", icid, result);
4048 /* Placeholder: Always unconfirmed */
4049 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4054 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4055 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4057 struct l2cap_move_chan_cfm *cfm = data;
4060 if (cmd_len != sizeof(*cfm))
4063 icid = le16_to_cpu(cfm->icid);
4064 result = le16_to_cpu(cfm->result);
4066 BT_DBG("icid %d, result %d", icid, result);
4068 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4073 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4074 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4076 struct l2cap_move_chan_cfm_rsp *rsp = data;
4079 if (cmd_len != sizeof(*rsp))
4082 icid = le16_to_cpu(rsp->icid);
4084 BT_DBG("icid %d", icid);
4089 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4094 if (min > max || min < 6 || max > 3200)
4097 if (to_multiplier < 10 || to_multiplier > 3200)
4100 if (max >= to_multiplier * 8)
4103 max_latency = (to_multiplier * 8 / max) - 1;
4104 if (latency > 499 || latency > max_latency)
4110 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4111 struct l2cap_cmd_hdr *cmd, u8 *data)
4113 struct hci_conn *hcon = conn->hcon;
4114 struct l2cap_conn_param_update_req *req;
4115 struct l2cap_conn_param_update_rsp rsp;
4116 u16 min, max, latency, to_multiplier, cmd_len;
4119 if (!(hcon->link_mode & HCI_LM_MASTER))
4122 cmd_len = __le16_to_cpu(cmd->len);
4123 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4126 req = (struct l2cap_conn_param_update_req *) data;
4127 min = __le16_to_cpu(req->min);
4128 max = __le16_to_cpu(req->max);
4129 latency = __le16_to_cpu(req->latency);
4130 to_multiplier = __le16_to_cpu(req->to_multiplier);
4132 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4133 min, max, latency, to_multiplier);
4135 memset(&rsp, 0, sizeof(rsp));
4137 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4139 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4141 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4143 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4147 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4152 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4153 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4157 switch (cmd->code) {
4158 case L2CAP_COMMAND_REJ:
4159 l2cap_command_rej(conn, cmd, data);
4162 case L2CAP_CONN_REQ:
4163 err = l2cap_connect_req(conn, cmd, data);
4166 case L2CAP_CONN_RSP:
4167 err = l2cap_connect_rsp(conn, cmd, data);
4170 case L2CAP_CONF_REQ:
4171 err = l2cap_config_req(conn, cmd, cmd_len, data);
4174 case L2CAP_CONF_RSP:
4175 err = l2cap_config_rsp(conn, cmd, data);
4178 case L2CAP_DISCONN_REQ:
4179 err = l2cap_disconnect_req(conn, cmd, data);
4182 case L2CAP_DISCONN_RSP:
4183 err = l2cap_disconnect_rsp(conn, cmd, data);
4186 case L2CAP_ECHO_REQ:
4187 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4190 case L2CAP_ECHO_RSP:
4193 case L2CAP_INFO_REQ:
4194 err = l2cap_information_req(conn, cmd, data);
4197 case L2CAP_INFO_RSP:
4198 err = l2cap_information_rsp(conn, cmd, data);
4201 case L2CAP_CREATE_CHAN_REQ:
4202 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4205 case L2CAP_CREATE_CHAN_RSP:
4206 err = l2cap_create_channel_rsp(conn, cmd, data);
4209 case L2CAP_MOVE_CHAN_REQ:
4210 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4213 case L2CAP_MOVE_CHAN_RSP:
4214 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4217 case L2CAP_MOVE_CHAN_CFM:
4218 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4221 case L2CAP_MOVE_CHAN_CFM_RSP:
4222 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4226 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4234 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4235 struct l2cap_cmd_hdr *cmd, u8 *data)
4237 switch (cmd->code) {
4238 case L2CAP_COMMAND_REJ:
4241 case L2CAP_CONN_PARAM_UPDATE_REQ:
4242 return l2cap_conn_param_update_req(conn, cmd, data);
4244 case L2CAP_CONN_PARAM_UPDATE_RSP:
4248 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4253 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4254 struct sk_buff *skb)
4256 u8 *data = skb->data;
4258 struct l2cap_cmd_hdr cmd;
4261 l2cap_raw_recv(conn, skb);
4263 while (len >= L2CAP_CMD_HDR_SIZE) {
4265 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4266 data += L2CAP_CMD_HDR_SIZE;
4267 len -= L2CAP_CMD_HDR_SIZE;
4269 cmd_len = le16_to_cpu(cmd.len);
4271 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4273 if (cmd_len > len || !cmd.ident) {
4274 BT_DBG("corrupted command");
4278 if (conn->hcon->type == LE_LINK)
4279 err = l2cap_le_sig_cmd(conn, &cmd, data);
4281 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4284 struct l2cap_cmd_rej_unk rej;
4286 BT_ERR("Wrong link type (%d)", err);
4288 /* FIXME: Map err to a valid reason */
4289 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4290 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4300 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4302 u16 our_fcs, rcv_fcs;
4305 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4306 hdr_size = L2CAP_EXT_HDR_SIZE;
4308 hdr_size = L2CAP_ENH_HDR_SIZE;
4310 if (chan->fcs == L2CAP_FCS_CRC16) {
4311 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4312 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4313 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4315 if (our_fcs != rcv_fcs)
4321 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4323 struct l2cap_ctrl control;
4325 BT_DBG("chan %p", chan);
4327 memset(&control, 0, sizeof(control));
4330 control.reqseq = chan->buffer_seq;
4331 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4333 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4334 control.super = L2CAP_SUPER_RNR;
4335 l2cap_send_sframe(chan, &control);
4338 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4339 chan->unacked_frames > 0)
4340 __set_retrans_timer(chan);
4342 /* Send pending iframes */
4343 l2cap_ertm_send(chan);
4345 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4346 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4347 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4350 control.super = L2CAP_SUPER_RR;
4351 l2cap_send_sframe(chan, &control);
4355 static void append_skb_frag(struct sk_buff *skb,
4356 struct sk_buff *new_frag, struct sk_buff **last_frag)
4358 /* skb->len reflects data in skb as well as all fragments
4359 * skb->data_len reflects only data in fragments
4361 if (!skb_has_frag_list(skb))
4362 skb_shinfo(skb)->frag_list = new_frag;
4364 new_frag->next = NULL;
4366 (*last_frag)->next = new_frag;
4367 *last_frag = new_frag;
4369 skb->len += new_frag->len;
4370 skb->data_len += new_frag->len;
4371 skb->truesize += new_frag->truesize;
4374 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4375 struct l2cap_ctrl *control)
4379 switch (control->sar) {
4380 case L2CAP_SAR_UNSEGMENTED:
4384 err = chan->ops->recv(chan, skb);
4387 case L2CAP_SAR_START:
4391 chan->sdu_len = get_unaligned_le16(skb->data);
4392 skb_pull(skb, L2CAP_SDULEN_SIZE);
4394 if (chan->sdu_len > chan->imtu) {
4399 if (skb->len >= chan->sdu_len)
4403 chan->sdu_last_frag = skb;
4409 case L2CAP_SAR_CONTINUE:
4413 append_skb_frag(chan->sdu, skb,
4414 &chan->sdu_last_frag);
4417 if (chan->sdu->len >= chan->sdu_len)
4427 append_skb_frag(chan->sdu, skb,
4428 &chan->sdu_last_frag);
4431 if (chan->sdu->len != chan->sdu_len)
4434 err = chan->ops->recv(chan, chan->sdu);
4437 /* Reassembly complete */
4439 chan->sdu_last_frag = NULL;
4447 kfree_skb(chan->sdu);
4449 chan->sdu_last_frag = NULL;
4456 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4460 if (chan->mode != L2CAP_MODE_ERTM)
4463 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4464 l2cap_tx(chan, NULL, NULL, event);
4467 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4470 /* Pass sequential frames to l2cap_reassemble_sdu()
4471 * until a gap is encountered.
4474 BT_DBG("chan %p", chan);
4476 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4477 struct sk_buff *skb;
4478 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4479 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4481 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4486 skb_unlink(skb, &chan->srej_q);
4487 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4488 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4493 if (skb_queue_empty(&chan->srej_q)) {
4494 chan->rx_state = L2CAP_RX_STATE_RECV;
4495 l2cap_send_ack(chan);
4501 static void l2cap_handle_srej(struct l2cap_chan *chan,
4502 struct l2cap_ctrl *control)
4504 struct sk_buff *skb;
4506 BT_DBG("chan %p, control %p", chan, control);
4508 if (control->reqseq == chan->next_tx_seq) {
4509 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4510 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4514 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4517 BT_DBG("Seq %d not available for retransmission",
4522 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4523 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4524 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4528 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4530 if (control->poll) {
4531 l2cap_pass_to_tx(chan, control);
4533 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4534 l2cap_retransmit(chan, control);
4535 l2cap_ertm_send(chan);
4537 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4538 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4539 chan->srej_save_reqseq = control->reqseq;
4542 l2cap_pass_to_tx_fbit(chan, control);
4544 if (control->final) {
4545 if (chan->srej_save_reqseq != control->reqseq ||
4546 !test_and_clear_bit(CONN_SREJ_ACT,
4548 l2cap_retransmit(chan, control);
4550 l2cap_retransmit(chan, control);
4551 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4552 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4553 chan->srej_save_reqseq = control->reqseq;
4559 static void l2cap_handle_rej(struct l2cap_chan *chan,
4560 struct l2cap_ctrl *control)
4562 struct sk_buff *skb;
4564 BT_DBG("chan %p, control %p", chan, control);
4566 if (control->reqseq == chan->next_tx_seq) {
4567 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4568 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4572 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4574 if (chan->max_tx && skb &&
4575 bt_cb(skb)->control.retries >= chan->max_tx) {
4576 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4577 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4581 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4583 l2cap_pass_to_tx(chan, control);
4585 if (control->final) {
4586 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4587 l2cap_retransmit_all(chan, control);
4589 l2cap_retransmit_all(chan, control);
4590 l2cap_ertm_send(chan);
4591 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4592 set_bit(CONN_REJ_ACT, &chan->conn_state);
4596 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4598 BT_DBG("chan %p, txseq %d", chan, txseq);
4600 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4601 chan->expected_tx_seq);
4603 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4604 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4606 /* See notes below regarding "double poll" and
4609 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4610 BT_DBG("Invalid/Ignore - after SREJ");
4611 return L2CAP_TXSEQ_INVALID_IGNORE;
4613 BT_DBG("Invalid - in window after SREJ sent");
4614 return L2CAP_TXSEQ_INVALID;
4618 if (chan->srej_list.head == txseq) {
4619 BT_DBG("Expected SREJ");
4620 return L2CAP_TXSEQ_EXPECTED_SREJ;
4623 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4624 BT_DBG("Duplicate SREJ - txseq already stored");
4625 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4628 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4629 BT_DBG("Unexpected SREJ - not requested");
4630 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4634 if (chan->expected_tx_seq == txseq) {
4635 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4637 BT_DBG("Invalid - txseq outside tx window");
4638 return L2CAP_TXSEQ_INVALID;
4641 return L2CAP_TXSEQ_EXPECTED;
4645 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4646 __seq_offset(chan, chan->expected_tx_seq,
4647 chan->last_acked_seq)){
4648 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4649 return L2CAP_TXSEQ_DUPLICATE;
4652 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4653 /* A source of invalid packets is a "double poll" condition,
4654 * where delays cause us to send multiple poll packets. If
4655 * the remote stack receives and processes both polls,
4656 * sequence numbers can wrap around in such a way that a
4657 * resent frame has a sequence number that looks like new data
4658 * with a sequence gap. This would trigger an erroneous SREJ
4661 * Fortunately, this is impossible with a tx window that's
4662 * less than half of the maximum sequence number, which allows
4663 * invalid frames to be safely ignored.
4665 * With tx window sizes greater than half of the tx window
4666 * maximum, the frame is invalid and cannot be ignored. This
4667 * causes a disconnect.
4670 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4671 BT_DBG("Invalid/Ignore - txseq outside tx window");
4672 return L2CAP_TXSEQ_INVALID_IGNORE;
4674 BT_DBG("Invalid - txseq outside tx window");
4675 return L2CAP_TXSEQ_INVALID;
4678 BT_DBG("Unexpected - txseq indicates missing frames");
4679 return L2CAP_TXSEQ_UNEXPECTED;
4683 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4684 struct l2cap_ctrl *control,
4685 struct sk_buff *skb, u8 event)
4688 bool skb_in_use = 0;
4690 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4694 case L2CAP_EV_RECV_IFRAME:
4695 switch (l2cap_classify_txseq(chan, control->txseq)) {
4696 case L2CAP_TXSEQ_EXPECTED:
4697 l2cap_pass_to_tx(chan, control);
4699 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4700 BT_DBG("Busy, discarding expected seq %d",
4705 chan->expected_tx_seq = __next_seq(chan,
4708 chan->buffer_seq = chan->expected_tx_seq;
4711 err = l2cap_reassemble_sdu(chan, skb, control);
4715 if (control->final) {
4716 if (!test_and_clear_bit(CONN_REJ_ACT,
4717 &chan->conn_state)) {
4719 l2cap_retransmit_all(chan, control);
4720 l2cap_ertm_send(chan);
4724 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4725 l2cap_send_ack(chan);
4727 case L2CAP_TXSEQ_UNEXPECTED:
4728 l2cap_pass_to_tx(chan, control);
4730 /* Can't issue SREJ frames in the local busy state.
4731 * Drop this frame, it will be seen as missing
4732 * when local busy is exited.
4734 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4735 BT_DBG("Busy, discarding unexpected seq %d",
4740 /* There was a gap in the sequence, so an SREJ
4741 * must be sent for each missing frame. The
4742 * current frame is stored for later use.
4744 skb_queue_tail(&chan->srej_q, skb);
4746 BT_DBG("Queued %p (queue len %d)", skb,
4747 skb_queue_len(&chan->srej_q));
4749 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4750 l2cap_seq_list_clear(&chan->srej_list);
4751 l2cap_send_srej(chan, control->txseq);
4753 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4755 case L2CAP_TXSEQ_DUPLICATE:
4756 l2cap_pass_to_tx(chan, control);
4758 case L2CAP_TXSEQ_INVALID_IGNORE:
4760 case L2CAP_TXSEQ_INVALID:
4762 l2cap_send_disconn_req(chan->conn, chan,
4767 case L2CAP_EV_RECV_RR:
4768 l2cap_pass_to_tx(chan, control);
4769 if (control->final) {
4770 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4772 if (!test_and_clear_bit(CONN_REJ_ACT,
4773 &chan->conn_state)) {
4775 l2cap_retransmit_all(chan, control);
4778 l2cap_ertm_send(chan);
4779 } else if (control->poll) {
4780 l2cap_send_i_or_rr_or_rnr(chan);
4782 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4783 &chan->conn_state) &&
4784 chan->unacked_frames)
4785 __set_retrans_timer(chan);
4787 l2cap_ertm_send(chan);
4790 case L2CAP_EV_RECV_RNR:
4791 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4792 l2cap_pass_to_tx(chan, control);
4793 if (control && control->poll) {
4794 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4795 l2cap_send_rr_or_rnr(chan, 0);
4797 __clear_retrans_timer(chan);
4798 l2cap_seq_list_clear(&chan->retrans_list);
4800 case L2CAP_EV_RECV_REJ:
4801 l2cap_handle_rej(chan, control);
4803 case L2CAP_EV_RECV_SREJ:
4804 l2cap_handle_srej(chan, control);
4810 if (skb && !skb_in_use) {
4811 BT_DBG("Freeing %p", skb);
4818 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4819 struct l2cap_ctrl *control,
4820 struct sk_buff *skb, u8 event)
4823 u16 txseq = control->txseq;
4824 bool skb_in_use = 0;
4826 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4830 case L2CAP_EV_RECV_IFRAME:
4831 switch (l2cap_classify_txseq(chan, txseq)) {
4832 case L2CAP_TXSEQ_EXPECTED:
4833 /* Keep frame for reassembly later */
4834 l2cap_pass_to_tx(chan, control);
4835 skb_queue_tail(&chan->srej_q, skb);
4837 BT_DBG("Queued %p (queue len %d)", skb,
4838 skb_queue_len(&chan->srej_q));
4840 chan->expected_tx_seq = __next_seq(chan, txseq);
4842 case L2CAP_TXSEQ_EXPECTED_SREJ:
4843 l2cap_seq_list_pop(&chan->srej_list);
4845 l2cap_pass_to_tx(chan, control);
4846 skb_queue_tail(&chan->srej_q, skb);
4848 BT_DBG("Queued %p (queue len %d)", skb,
4849 skb_queue_len(&chan->srej_q));
4851 err = l2cap_rx_queued_iframes(chan);
4856 case L2CAP_TXSEQ_UNEXPECTED:
4857 /* Got a frame that can't be reassembled yet.
4858 * Save it for later, and send SREJs to cover
4859 * the missing frames.
4861 skb_queue_tail(&chan->srej_q, skb);
4863 BT_DBG("Queued %p (queue len %d)", skb,
4864 skb_queue_len(&chan->srej_q));
4866 l2cap_pass_to_tx(chan, control);
4867 l2cap_send_srej(chan, control->txseq);
4869 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4870 /* This frame was requested with an SREJ, but
4871 * some expected retransmitted frames are
4872 * missing. Request retransmission of missing
4875 skb_queue_tail(&chan->srej_q, skb);
4877 BT_DBG("Queued %p (queue len %d)", skb,
4878 skb_queue_len(&chan->srej_q));
4880 l2cap_pass_to_tx(chan, control);
4881 l2cap_send_srej_list(chan, control->txseq);
4883 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4884 /* We've already queued this frame. Drop this copy. */
4885 l2cap_pass_to_tx(chan, control);
4887 case L2CAP_TXSEQ_DUPLICATE:
4888 /* Expecting a later sequence number, so this frame
4889 * was already received. Ignore it completely.
4892 case L2CAP_TXSEQ_INVALID_IGNORE:
4894 case L2CAP_TXSEQ_INVALID:
4896 l2cap_send_disconn_req(chan->conn, chan,
4901 case L2CAP_EV_RECV_RR:
4902 l2cap_pass_to_tx(chan, control);
4903 if (control->final) {
4904 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4906 if (!test_and_clear_bit(CONN_REJ_ACT,
4907 &chan->conn_state)) {
4909 l2cap_retransmit_all(chan, control);
4912 l2cap_ertm_send(chan);
4913 } else if (control->poll) {
4914 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4915 &chan->conn_state) &&
4916 chan->unacked_frames) {
4917 __set_retrans_timer(chan);
4920 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4921 l2cap_send_srej_tail(chan);
4923 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4924 &chan->conn_state) &&
4925 chan->unacked_frames)
4926 __set_retrans_timer(chan);
4928 l2cap_send_ack(chan);
4931 case L2CAP_EV_RECV_RNR:
4932 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4933 l2cap_pass_to_tx(chan, control);
4934 if (control->poll) {
4935 l2cap_send_srej_tail(chan);
4937 struct l2cap_ctrl rr_control;
4938 memset(&rr_control, 0, sizeof(rr_control));
4939 rr_control.sframe = 1;
4940 rr_control.super = L2CAP_SUPER_RR;
4941 rr_control.reqseq = chan->buffer_seq;
4942 l2cap_send_sframe(chan, &rr_control);
4946 case L2CAP_EV_RECV_REJ:
4947 l2cap_handle_rej(chan, control);
4949 case L2CAP_EV_RECV_SREJ:
4950 l2cap_handle_srej(chan, control);
4954 if (skb && !skb_in_use) {
4955 BT_DBG("Freeing %p", skb);
4962 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4964 /* Make sure reqseq is for a packet that has been sent but not acked */
4967 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4968 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4971 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4972 struct sk_buff *skb, u8 event)
4976 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4977 control, skb, event, chan->rx_state);
4979 if (__valid_reqseq(chan, control->reqseq)) {
4980 switch (chan->rx_state) {
4981 case L2CAP_RX_STATE_RECV:
4982 err = l2cap_rx_state_recv(chan, control, skb, event);
4984 case L2CAP_RX_STATE_SREJ_SENT:
4985 err = l2cap_rx_state_srej_sent(chan, control, skb,
4993 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4994 control->reqseq, chan->next_tx_seq,
4995 chan->expected_ack_seq);
4996 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5002 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5003 struct sk_buff *skb)
5007 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5010 if (l2cap_classify_txseq(chan, control->txseq) ==
5011 L2CAP_TXSEQ_EXPECTED) {
5012 l2cap_pass_to_tx(chan, control);
5014 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5015 __next_seq(chan, chan->buffer_seq));
5017 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5019 l2cap_reassemble_sdu(chan, skb, control);
5022 kfree_skb(chan->sdu);
5025 chan->sdu_last_frag = NULL;
5029 BT_DBG("Freeing %p", skb);
5034 chan->last_acked_seq = control->txseq;
5035 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5040 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5042 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5046 __unpack_control(chan, skb);
5051 * We can just drop the corrupted I-frame here.
5052 * Receiver will miss it and start proper recovery
5053 * procedures and ask for retransmission.
5055 if (l2cap_check_fcs(chan, skb))
5058 if (!control->sframe && control->sar == L2CAP_SAR_START)
5059 len -= L2CAP_SDULEN_SIZE;
5061 if (chan->fcs == L2CAP_FCS_CRC16)
5062 len -= L2CAP_FCS_SIZE;
5064 if (len > chan->mps) {
5065 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5069 if (!control->sframe) {
5072 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5073 control->sar, control->reqseq, control->final,
5076 /* Validate F-bit - F=0 always valid, F=1 only
5077 * valid in TX WAIT_F
5079 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5082 if (chan->mode != L2CAP_MODE_STREAMING) {
5083 event = L2CAP_EV_RECV_IFRAME;
5084 err = l2cap_rx(chan, control, skb, event);
5086 err = l2cap_stream_rx(chan, control, skb);
5090 l2cap_send_disconn_req(chan->conn, chan,
5093 const u8 rx_func_to_event[4] = {
5094 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5095 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5098 /* Only I-frames are expected in streaming mode */
5099 if (chan->mode == L2CAP_MODE_STREAMING)
5102 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5103 control->reqseq, control->final, control->poll,
5108 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5112 /* Validate F and P bits */
5113 if (control->final && (control->poll ||
5114 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5117 event = rx_func_to_event[control->super];
5118 if (l2cap_rx(chan, control, skb, event))
5119 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5129 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5131 struct l2cap_chan *chan;
5133 chan = l2cap_get_chan_by_scid(conn, cid);
5135 BT_DBG("unknown cid 0x%4.4x", cid);
5136 /* Drop packet and return */
5141 BT_DBG("chan %p, len %d", chan, skb->len);
5143 if (chan->state != BT_CONNECTED)
5146 switch (chan->mode) {
5147 case L2CAP_MODE_BASIC:
5148 /* If socket recv buffers overflows we drop data here
5149 * which is *bad* because L2CAP has to be reliable.
5150 * But we don't have any other choice. L2CAP doesn't
5151 * provide flow control mechanism. */
5153 if (chan->imtu < skb->len)
5156 if (!chan->ops->recv(chan, skb))
5160 case L2CAP_MODE_ERTM:
5161 case L2CAP_MODE_STREAMING:
5162 l2cap_data_rcv(chan, skb);
5166 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5174 l2cap_chan_unlock(chan);
5179 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5181 struct l2cap_chan *chan;
5183 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5187 BT_DBG("chan %p, len %d", chan, skb->len);
5189 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5192 if (chan->imtu < skb->len)
5195 if (!chan->ops->recv(chan, skb))
5204 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5205 struct sk_buff *skb)
5207 struct l2cap_chan *chan;
5209 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5213 BT_DBG("chan %p, len %d", chan, skb->len);
5215 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5218 if (chan->imtu < skb->len)
5221 if (!chan->ops->recv(chan, skb))
5230 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5232 struct l2cap_hdr *lh = (void *) skb->data;
5236 skb_pull(skb, L2CAP_HDR_SIZE);
5237 cid = __le16_to_cpu(lh->cid);
5238 len = __le16_to_cpu(lh->len);
5240 if (len != skb->len) {
5245 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5248 case L2CAP_CID_LE_SIGNALING:
5249 case L2CAP_CID_SIGNALING:
5250 l2cap_sig_channel(conn, skb);
5253 case L2CAP_CID_CONN_LESS:
5254 psm = get_unaligned((__le16 *) skb->data);
5256 l2cap_conless_channel(conn, psm, skb);
5259 case L2CAP_CID_LE_DATA:
5260 l2cap_att_channel(conn, cid, skb);
5264 if (smp_sig_channel(conn, skb))
5265 l2cap_conn_del(conn->hcon, EACCES);
5269 l2cap_data_channel(conn, cid, skb);
5274 /* ---- L2CAP interface with lower layer (HCI) ---- */
5276 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5278 int exact = 0, lm1 = 0, lm2 = 0;
5279 struct l2cap_chan *c;
5281 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5283 /* Find listening sockets and check their link_mode */
5284 read_lock(&chan_list_lock);
5285 list_for_each_entry(c, &chan_list, global_l) {
5286 struct sock *sk = c->sk;
5288 if (c->state != BT_LISTEN)
5291 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5292 lm1 |= HCI_LM_ACCEPT;
5293 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5294 lm1 |= HCI_LM_MASTER;
5296 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5297 lm2 |= HCI_LM_ACCEPT;
5298 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5299 lm2 |= HCI_LM_MASTER;
5302 read_unlock(&chan_list_lock);
5304 return exact ? lm1 : lm2;
5307 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5309 struct l2cap_conn *conn;
5311 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5314 conn = l2cap_conn_add(hcon, status);
5316 l2cap_conn_ready(conn);
5318 l2cap_conn_del(hcon, bt_to_errno(status));
5323 int l2cap_disconn_ind(struct hci_conn *hcon)
5325 struct l2cap_conn *conn = hcon->l2cap_data;
5327 BT_DBG("hcon %p", hcon);
5330 return HCI_ERROR_REMOTE_USER_TERM;
5331 return conn->disc_reason;
5334 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5336 BT_DBG("hcon %p reason %d", hcon, reason);
5338 l2cap_conn_del(hcon, bt_to_errno(reason));
5342 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5344 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5347 if (encrypt == 0x00) {
5348 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5349 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5350 } else if (chan->sec_level == BT_SECURITY_HIGH)
5351 l2cap_chan_close(chan, ECONNREFUSED);
5353 if (chan->sec_level == BT_SECURITY_MEDIUM)
5354 __clear_chan_timer(chan);
5358 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5360 struct l2cap_conn *conn = hcon->l2cap_data;
5361 struct l2cap_chan *chan;
5366 BT_DBG("conn %p", conn);
5368 if (hcon->type == LE_LINK) {
5369 if (!status && encrypt)
5370 smp_distribute_keys(conn, 0);
5371 cancel_delayed_work(&conn->security_timer);
5374 mutex_lock(&conn->chan_lock);
5376 list_for_each_entry(chan, &conn->chan_l, list) {
5377 l2cap_chan_lock(chan);
5379 BT_DBG("chan->scid %d", chan->scid);
5381 if (chan->scid == L2CAP_CID_LE_DATA) {
5382 if (!status && encrypt) {
5383 chan->sec_level = hcon->sec_level;
5384 l2cap_chan_ready(chan);
5387 l2cap_chan_unlock(chan);
5391 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5392 l2cap_chan_unlock(chan);
5396 if (!status && (chan->state == BT_CONNECTED ||
5397 chan->state == BT_CONFIG)) {
5398 struct sock *sk = chan->sk;
5400 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5401 sk->sk_state_change(sk);
5403 l2cap_check_encryption(chan, encrypt);
5404 l2cap_chan_unlock(chan);
5408 if (chan->state == BT_CONNECT) {
5410 l2cap_send_conn_req(chan);
5412 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5414 } else if (chan->state == BT_CONNECT2) {
5415 struct sock *sk = chan->sk;
5416 struct l2cap_conn_rsp rsp;
5422 if (test_bit(BT_SK_DEFER_SETUP,
5423 &bt_sk(sk)->flags)) {
5424 struct sock *parent = bt_sk(sk)->parent;
5425 res = L2CAP_CR_PEND;
5426 stat = L2CAP_CS_AUTHOR_PEND;
5428 parent->sk_data_ready(parent, 0);
5430 __l2cap_state_change(chan, BT_CONFIG);
5431 res = L2CAP_CR_SUCCESS;
5432 stat = L2CAP_CS_NO_INFO;
5435 __l2cap_state_change(chan, BT_DISCONN);
5436 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5437 res = L2CAP_CR_SEC_BLOCK;
5438 stat = L2CAP_CS_NO_INFO;
5443 rsp.scid = cpu_to_le16(chan->dcid);
5444 rsp.dcid = cpu_to_le16(chan->scid);
5445 rsp.result = cpu_to_le16(res);
5446 rsp.status = cpu_to_le16(stat);
5447 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5450 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5451 res == L2CAP_CR_SUCCESS) {
5453 set_bit(CONF_REQ_SENT, &chan->conf_state);
5454 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5456 l2cap_build_conf_req(chan, buf),
5458 chan->num_conf_req++;
5462 l2cap_chan_unlock(chan);
5465 mutex_unlock(&conn->chan_lock);
5470 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5472 struct l2cap_conn *conn = hcon->l2cap_data;
5475 conn = l2cap_conn_add(hcon, 0);
5480 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5482 if (!(flags & ACL_CONT)) {
5483 struct l2cap_hdr *hdr;
5487 BT_ERR("Unexpected start frame (len %d)", skb->len);
5488 kfree_skb(conn->rx_skb);
5489 conn->rx_skb = NULL;
5491 l2cap_conn_unreliable(conn, ECOMM);
5494 /* Start fragment always begin with Basic L2CAP header */
5495 if (skb->len < L2CAP_HDR_SIZE) {
5496 BT_ERR("Frame is too short (len %d)", skb->len);
5497 l2cap_conn_unreliable(conn, ECOMM);
5501 hdr = (struct l2cap_hdr *) skb->data;
5502 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5504 if (len == skb->len) {
5505 /* Complete frame received */
5506 l2cap_recv_frame(conn, skb);
5510 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5512 if (skb->len > len) {
5513 BT_ERR("Frame is too long (len %d, expected len %d)",
5515 l2cap_conn_unreliable(conn, ECOMM);
5519 /* Allocate skb for the complete frame (with header) */
5520 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5524 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5526 conn->rx_len = len - skb->len;
5528 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5530 if (!conn->rx_len) {
5531 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5532 l2cap_conn_unreliable(conn, ECOMM);
5536 if (skb->len > conn->rx_len) {
5537 BT_ERR("Fragment is too long (len %d, expected %d)",
5538 skb->len, conn->rx_len);
5539 kfree_skb(conn->rx_skb);
5540 conn->rx_skb = NULL;
5542 l2cap_conn_unreliable(conn, ECOMM);
5546 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5548 conn->rx_len -= skb->len;
5550 if (!conn->rx_len) {
5551 /* Complete frame received */
5552 l2cap_recv_frame(conn, conn->rx_skb);
5553 conn->rx_skb = NULL;
5562 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5564 struct l2cap_chan *c;
5566 read_lock(&chan_list_lock);
5568 list_for_each_entry(c, &chan_list, global_l) {
5569 struct sock *sk = c->sk;
5571 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5572 batostr(&bt_sk(sk)->src),
5573 batostr(&bt_sk(sk)->dst),
5574 c->state, __le16_to_cpu(c->psm),
5575 c->scid, c->dcid, c->imtu, c->omtu,
5576 c->sec_level, c->mode);
5579 read_unlock(&chan_list_lock);
5584 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5586 return single_open(file, l2cap_debugfs_show, inode->i_private);
5589 static const struct file_operations l2cap_debugfs_fops = {
5590 .open = l2cap_debugfs_open,
5592 .llseek = seq_lseek,
5593 .release = single_release,
5596 static struct dentry *l2cap_debugfs;
5598 int __init l2cap_init(void)
5602 err = l2cap_init_sockets();
5607 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5608 bt_debugfs, NULL, &l2cap_debugfs_fops);
5610 BT_ERR("Failed to create L2CAP debug file");
5616 void l2cap_exit(void)
5618 debugfs_remove(l2cap_debugfs);
5619 l2cap_cleanup_sockets();
5622 module_param(disable_ertm, bool, 0644);
5623 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");