2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static struct workqueue_struct *_busy_wq;
67 struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void l2cap_sock_close(struct sock *sk);
75 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
76 u8 code, u8 ident, u16 dlen, void *data);
78 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
80 /* ---- L2CAP channels ---- */
81 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
84 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
85 if (l2cap_pi(s)->dcid == cid)
91 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
94 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
95 if (l2cap_pi(s)->scid == cid)
101 /* Find channel with given SCID.
102 * Returns locked socket */
103 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
107 s = __l2cap_get_chan_by_scid(l, cid);
110 read_unlock(&l->lock);
114 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
117 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
118 if (l2cap_pi(s)->ident == ident)
124 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
128 s = __l2cap_get_chan_by_ident(l, ident);
131 read_unlock(&l->lock);
135 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
137 u16 cid = L2CAP_CID_DYN_START;
139 for (; cid < L2CAP_CID_DYN_END; cid++) {
140 if (!__l2cap_get_chan_by_scid(l, cid))
147 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
152 l2cap_pi(l->head)->prev_c = sk;
154 l2cap_pi(sk)->next_c = l->head;
155 l2cap_pi(sk)->prev_c = NULL;
159 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
161 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
163 write_lock_bh(&l->lock);
168 l2cap_pi(next)->prev_c = prev;
170 l2cap_pi(prev)->next_c = next;
171 write_unlock_bh(&l->lock);
176 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
178 struct l2cap_chan_list *l = &conn->chan_list;
180 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
181 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
183 conn->disc_reason = 0x13;
185 l2cap_pi(sk)->conn = conn;
187 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
188 /* Alloc CID for connection-oriented socket */
189 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
190 } else if (sk->sk_type == SOCK_DGRAM) {
191 /* Connectionless socket */
192 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
193 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
194 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
196 /* Raw socket can send/recv signalling messages only */
197 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
198 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
199 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
202 __l2cap_chan_link(l, sk);
205 bt_accept_enqueue(parent, sk);
209 * Must be called on the locked socket. */
210 static void l2cap_chan_del(struct sock *sk, int err)
212 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
213 struct sock *parent = bt_sk(sk)->parent;
215 l2cap_sock_clear_timer(sk);
217 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
220 /* Unlink from channel list */
221 l2cap_chan_unlink(&conn->chan_list, sk);
222 l2cap_pi(sk)->conn = NULL;
223 hci_conn_put(conn->hcon);
226 sk->sk_state = BT_CLOSED;
227 sock_set_flag(sk, SOCK_ZAPPED);
233 bt_accept_unlink(sk);
234 parent->sk_data_ready(parent, 0);
236 sk->sk_state_change(sk);
238 skb_queue_purge(TX_QUEUE(sk));
240 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
241 struct srej_list *l, *tmp;
243 del_timer(&l2cap_pi(sk)->retrans_timer);
244 del_timer(&l2cap_pi(sk)->monitor_timer);
245 del_timer(&l2cap_pi(sk)->ack_timer);
247 skb_queue_purge(SREJ_QUEUE(sk));
248 skb_queue_purge(BUSY_QUEUE(sk));
250 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
257 static inline u8 l2cap_get_auth_type(struct sock *sk)
259 if (sk->sk_type == SOCK_RAW) {
260 switch (l2cap_pi(sk)->sec_level) {
261 case BT_SECURITY_HIGH:
262 return HCI_AT_DEDICATED_BONDING_MITM;
263 case BT_SECURITY_MEDIUM:
264 return HCI_AT_DEDICATED_BONDING;
266 return HCI_AT_NO_BONDING;
268 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
269 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
270 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
272 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
273 return HCI_AT_NO_BONDING_MITM;
275 return HCI_AT_NO_BONDING;
277 switch (l2cap_pi(sk)->sec_level) {
278 case BT_SECURITY_HIGH:
279 return HCI_AT_GENERAL_BONDING_MITM;
280 case BT_SECURITY_MEDIUM:
281 return HCI_AT_GENERAL_BONDING;
283 return HCI_AT_NO_BONDING;
288 /* Service level security */
289 static inline int l2cap_check_security(struct sock *sk)
291 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
294 auth_type = l2cap_get_auth_type(sk);
296 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
300 u8 l2cap_get_ident(struct l2cap_conn *conn)
304 /* Get next available identificator.
305 * 1 - 128 are used by kernel.
306 * 129 - 199 are reserved.
307 * 200 - 254 are used by utilities like l2ping, etc.
310 spin_lock_bh(&conn->lock);
312 if (++conn->tx_ident > 128)
317 spin_unlock_bh(&conn->lock);
322 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
324 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
327 BT_DBG("code 0x%2.2x", code);
332 if (lmp_no_flush_capable(conn->hcon->hdev))
333 flags = ACL_START_NO_FLUSH;
337 hci_send_acl(conn->hcon, skb, flags);
340 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 struct sock *sk = (struct sock *)pi;
346 int count, hlen = L2CAP_HDR_SIZE + 2;
349 if (sk->sk_state != BT_CONNECTED)
352 if (pi->fcs == L2CAP_FCS_CRC16)
355 BT_DBG("pi %p, control 0x%2.2x", pi, control);
357 count = min_t(unsigned int, conn->mtu, hlen);
358 control |= L2CAP_CTRL_FRAME_TYPE;
360 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
361 control |= L2CAP_CTRL_FINAL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
365 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
366 control |= L2CAP_CTRL_POLL;
367 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
370 skb = bt_skb_alloc(count, GFP_ATOMIC);
374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
375 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
376 lh->cid = cpu_to_le16(pi->dcid);
377 put_unaligned_le16(control, skb_put(skb, 2));
379 if (pi->fcs == L2CAP_FCS_CRC16) {
380 u16 fcs = crc16(0, (u8 *)lh, count - 2);
381 put_unaligned_le16(fcs, skb_put(skb, 2));
384 if (lmp_no_flush_capable(conn->hcon->hdev))
385 flags = ACL_START_NO_FLUSH;
389 hci_send_acl(pi->conn->hcon, skb, flags);
392 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
394 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
395 control |= L2CAP_SUPER_RCV_NOT_READY;
396 pi->conn_state |= L2CAP_CONN_RNR_SENT;
398 control |= L2CAP_SUPER_RCV_READY;
400 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
402 l2cap_send_sframe(pi, control);
405 static inline int __l2cap_no_conn_pending(struct sock *sk)
407 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
410 static void l2cap_do_start(struct sock *sk)
412 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
414 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
415 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
418 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
419 struct l2cap_conn_req req;
420 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
421 req.psm = l2cap_pi(sk)->psm;
423 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
424 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
426 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
427 L2CAP_CONN_REQ, sizeof(req), &req);
430 struct l2cap_info_req req;
431 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
433 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
434 conn->info_ident = l2cap_get_ident(conn);
436 mod_timer(&conn->info_timer, jiffies +
437 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
439 l2cap_send_cmd(conn, conn->info_ident,
440 L2CAP_INFO_REQ, sizeof(req), &req);
444 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
446 u32 local_feat_mask = l2cap_feat_mask;
448 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
451 case L2CAP_MODE_ERTM:
452 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
453 case L2CAP_MODE_STREAMING:
454 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
460 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
462 struct l2cap_disconn_req req;
467 skb_queue_purge(TX_QUEUE(sk));
469 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
470 del_timer(&l2cap_pi(sk)->retrans_timer);
471 del_timer(&l2cap_pi(sk)->monitor_timer);
472 del_timer(&l2cap_pi(sk)->ack_timer);
475 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
476 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
477 l2cap_send_cmd(conn, l2cap_get_ident(conn),
478 L2CAP_DISCONN_REQ, sizeof(req), &req);
480 sk->sk_state = BT_DISCONN;
484 /* ---- L2CAP connections ---- */
485 static void l2cap_conn_start(struct l2cap_conn *conn)
487 struct l2cap_chan_list *l = &conn->chan_list;
488 struct sock_del_list del, *tmp1, *tmp2;
491 BT_DBG("conn %p", conn);
493 INIT_LIST_HEAD(&del.list);
497 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
500 if (sk->sk_type != SOCK_SEQPACKET &&
501 sk->sk_type != SOCK_STREAM) {
506 if (sk->sk_state == BT_CONNECT) {
507 struct l2cap_conn_req req;
509 if (!l2cap_check_security(sk) ||
510 !__l2cap_no_conn_pending(sk)) {
515 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
517 && l2cap_pi(sk)->conf_state &
518 L2CAP_CONF_STATE2_DEVICE) {
519 tmp1 = kzalloc(sizeof(struct sock_del_list),
522 list_add_tail(&tmp1->list, &del.list);
527 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
528 req.psm = l2cap_pi(sk)->psm;
530 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
531 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
533 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
534 L2CAP_CONN_REQ, sizeof(req), &req);
536 } else if (sk->sk_state == BT_CONNECT2) {
537 struct l2cap_conn_rsp rsp;
539 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
540 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
542 if (l2cap_check_security(sk)) {
543 if (bt_sk(sk)->defer_setup) {
544 struct sock *parent = bt_sk(sk)->parent;
545 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
546 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
547 parent->sk_data_ready(parent, 0);
550 sk->sk_state = BT_CONFIG;
551 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
552 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
555 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
556 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
559 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
560 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
562 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
563 rsp.result != L2CAP_CR_SUCCESS) {
568 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
569 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
570 l2cap_build_conf_req(sk, buf), buf);
571 l2cap_pi(sk)->num_conf_req++;
577 read_unlock(&l->lock);
579 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
580 bh_lock_sock(tmp1->sk);
581 __l2cap_sock_close(tmp1->sk, ECONNRESET);
582 bh_unlock_sock(tmp1->sk);
583 list_del(&tmp1->list);
588 static void l2cap_conn_ready(struct l2cap_conn *conn)
590 struct l2cap_chan_list *l = &conn->chan_list;
593 BT_DBG("conn %p", conn);
597 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
600 if (sk->sk_type != SOCK_SEQPACKET &&
601 sk->sk_type != SOCK_STREAM) {
602 l2cap_sock_clear_timer(sk);
603 sk->sk_state = BT_CONNECTED;
604 sk->sk_state_change(sk);
605 } else if (sk->sk_state == BT_CONNECT)
611 read_unlock(&l->lock);
614 /* Notify sockets that we cannot guaranty reliability anymore */
615 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
617 struct l2cap_chan_list *l = &conn->chan_list;
620 BT_DBG("conn %p", conn);
624 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
625 if (l2cap_pi(sk)->force_reliable)
629 read_unlock(&l->lock);
632 static void l2cap_info_timeout(unsigned long arg)
634 struct l2cap_conn *conn = (void *) arg;
636 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
637 conn->info_ident = 0;
639 l2cap_conn_start(conn);
642 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
644 struct l2cap_conn *conn = hcon->l2cap_data;
649 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
653 hcon->l2cap_data = conn;
656 BT_DBG("hcon %p conn %p", hcon, conn);
658 conn->mtu = hcon->hdev->acl_mtu;
659 conn->src = &hcon->hdev->bdaddr;
660 conn->dst = &hcon->dst;
664 spin_lock_init(&conn->lock);
665 rwlock_init(&conn->chan_list.lock);
667 setup_timer(&conn->info_timer, l2cap_info_timeout,
668 (unsigned long) conn);
670 conn->disc_reason = 0x13;
675 static void l2cap_conn_del(struct hci_conn *hcon, int err)
677 struct l2cap_conn *conn = hcon->l2cap_data;
683 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
685 kfree_skb(conn->rx_skb);
688 while ((sk = conn->chan_list.head)) {
690 l2cap_chan_del(sk, err);
695 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
696 del_timer_sync(&conn->info_timer);
698 hcon->l2cap_data = NULL;
702 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
704 struct l2cap_chan_list *l = &conn->chan_list;
705 write_lock_bh(&l->lock);
706 __l2cap_chan_add(conn, sk, parent);
707 write_unlock_bh(&l->lock);
710 /* ---- Socket interface ---- */
712 /* Find socket with psm and source bdaddr.
713 * Returns closest match.
715 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
717 struct sock *sk = NULL, *sk1 = NULL;
718 struct hlist_node *node;
720 read_lock(&l2cap_sk_list.lock);
722 sk_for_each(sk, node, &l2cap_sk_list.head) {
723 if (state && sk->sk_state != state)
726 if (l2cap_pi(sk)->psm == psm) {
728 if (!bacmp(&bt_sk(sk)->src, src))
732 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
737 read_unlock(&l2cap_sk_list.lock);
739 return node ? sk : sk1;
742 static void l2cap_sock_cleanup_listen(struct sock *parent)
746 BT_DBG("parent %p", parent);
748 /* Close not yet accepted channels */
749 while ((sk = bt_accept_dequeue(parent, NULL)))
750 l2cap_sock_close(sk);
752 parent->sk_state = BT_CLOSED;
753 sock_set_flag(parent, SOCK_ZAPPED);
756 /* Kill socket (only if zapped and orphan)
757 * Must be called on unlocked socket.
759 void l2cap_sock_kill(struct sock *sk)
761 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
764 BT_DBG("sk %p state %d", sk, sk->sk_state);
766 /* Kill poor orphan */
767 bt_sock_unlink(&l2cap_sk_list, sk);
768 sock_set_flag(sk, SOCK_DEAD);
772 void __l2cap_sock_close(struct sock *sk, int reason)
774 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
776 switch (sk->sk_state) {
778 l2cap_sock_cleanup_listen(sk);
783 if (sk->sk_type == SOCK_SEQPACKET ||
784 sk->sk_type == SOCK_STREAM) {
785 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
787 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
788 l2cap_send_disconn_req(conn, sk, reason);
790 l2cap_chan_del(sk, reason);
794 if (sk->sk_type == SOCK_SEQPACKET ||
795 sk->sk_type == SOCK_STREAM) {
796 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
797 struct l2cap_conn_rsp rsp;
800 if (bt_sk(sk)->defer_setup)
801 result = L2CAP_CR_SEC_BLOCK;
803 result = L2CAP_CR_BAD_PSM;
804 sk->sk_state = BT_DISCONN;
806 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
807 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
808 rsp.result = cpu_to_le16(result);
809 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
810 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
811 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
813 l2cap_chan_del(sk, reason);
818 l2cap_chan_del(sk, reason);
822 sock_set_flag(sk, SOCK_ZAPPED);
827 /* Must be called on unlocked socket. */
828 static void l2cap_sock_close(struct sock *sk)
830 l2cap_sock_clear_timer(sk);
832 __l2cap_sock_close(sk, ECONNRESET);
837 int l2cap_do_connect(struct sock *sk)
839 bdaddr_t *src = &bt_sk(sk)->src;
840 bdaddr_t *dst = &bt_sk(sk)->dst;
841 struct l2cap_conn *conn;
842 struct hci_conn *hcon;
843 struct hci_dev *hdev;
847 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
850 hdev = hci_get_route(dst, src);
852 return -EHOSTUNREACH;
854 hci_dev_lock_bh(hdev);
858 auth_type = l2cap_get_auth_type(sk);
860 hcon = hci_connect(hdev, ACL_LINK, dst,
861 l2cap_pi(sk)->sec_level, auth_type);
865 conn = l2cap_conn_add(hcon, 0);
873 /* Update source addr of the socket */
874 bacpy(src, conn->src);
876 l2cap_chan_add(conn, sk, NULL);
878 sk->sk_state = BT_CONNECT;
879 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
881 if (hcon->state == BT_CONNECTED) {
882 if (sk->sk_type != SOCK_SEQPACKET &&
883 sk->sk_type != SOCK_STREAM) {
884 l2cap_sock_clear_timer(sk);
885 if (l2cap_check_security(sk))
886 sk->sk_state = BT_CONNECTED;
892 hci_dev_unlock_bh(hdev);
897 int __l2cap_wait_ack(struct sock *sk)
899 DECLARE_WAITQUEUE(wait, current);
903 add_wait_queue(sk_sleep(sk), &wait);
904 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
905 set_current_state(TASK_INTERRUPTIBLE);
910 if (signal_pending(current)) {
911 err = sock_intr_errno(timeo);
916 timeo = schedule_timeout(timeo);
919 err = sock_error(sk);
923 set_current_state(TASK_RUNNING);
924 remove_wait_queue(sk_sleep(sk), &wait);
928 static void l2cap_monitor_timeout(unsigned long arg)
930 struct sock *sk = (void *) arg;
935 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
936 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
941 l2cap_pi(sk)->retry_count++;
942 __mod_monitor_timer();
944 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
948 static void l2cap_retrans_timeout(unsigned long arg)
950 struct sock *sk = (void *) arg;
955 l2cap_pi(sk)->retry_count = 1;
956 __mod_monitor_timer();
958 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
960 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
964 static void l2cap_drop_acked_frames(struct sock *sk)
968 while ((skb = skb_peek(TX_QUEUE(sk))) &&
969 l2cap_pi(sk)->unacked_frames) {
970 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
973 skb = skb_dequeue(TX_QUEUE(sk));
976 l2cap_pi(sk)->unacked_frames--;
979 if (!l2cap_pi(sk)->unacked_frames)
980 del_timer(&l2cap_pi(sk)->retrans_timer);
983 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
985 struct l2cap_pinfo *pi = l2cap_pi(sk);
986 struct hci_conn *hcon = pi->conn->hcon;
989 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
991 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
992 flags = ACL_START_NO_FLUSH;
996 hci_send_acl(hcon, skb, flags);
999 void l2cap_streaming_send(struct sock *sk)
1001 struct sk_buff *skb;
1002 struct l2cap_pinfo *pi = l2cap_pi(sk);
1005 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1006 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1007 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1008 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1010 if (pi->fcs == L2CAP_FCS_CRC16) {
1011 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1012 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1015 l2cap_do_send(sk, skb);
1017 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1021 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1023 struct l2cap_pinfo *pi = l2cap_pi(sk);
1024 struct sk_buff *skb, *tx_skb;
1027 skb = skb_peek(TX_QUEUE(sk));
1032 if (bt_cb(skb)->tx_seq == tx_seq)
1035 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1038 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1040 if (pi->remote_max_tx &&
1041 bt_cb(skb)->retries == pi->remote_max_tx) {
1042 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1046 tx_skb = skb_clone(skb, GFP_ATOMIC);
1047 bt_cb(skb)->retries++;
1048 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1050 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1051 control |= L2CAP_CTRL_FINAL;
1052 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1055 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1056 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1058 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1060 if (pi->fcs == L2CAP_FCS_CRC16) {
1061 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1062 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1065 l2cap_do_send(sk, tx_skb);
1068 int l2cap_ertm_send(struct sock *sk)
1070 struct sk_buff *skb, *tx_skb;
1071 struct l2cap_pinfo *pi = l2cap_pi(sk);
1075 if (sk->sk_state != BT_CONNECTED)
1078 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1080 if (pi->remote_max_tx &&
1081 bt_cb(skb)->retries == pi->remote_max_tx) {
1082 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1086 tx_skb = skb_clone(skb, GFP_ATOMIC);
1088 bt_cb(skb)->retries++;
1090 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1091 control &= L2CAP_CTRL_SAR;
1093 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1094 control |= L2CAP_CTRL_FINAL;
1095 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1097 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1098 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1099 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1102 if (pi->fcs == L2CAP_FCS_CRC16) {
1103 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1104 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1107 l2cap_do_send(sk, tx_skb);
1109 __mod_retrans_timer();
1111 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1112 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1114 pi->unacked_frames++;
1117 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1118 sk->sk_send_head = NULL;
1120 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1128 static int l2cap_retransmit_frames(struct sock *sk)
1130 struct l2cap_pinfo *pi = l2cap_pi(sk);
1133 if (!skb_queue_empty(TX_QUEUE(sk)))
1134 sk->sk_send_head = TX_QUEUE(sk)->next;
1136 pi->next_tx_seq = pi->expected_ack_seq;
1137 ret = l2cap_ertm_send(sk);
1141 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1143 struct sock *sk = (struct sock *)pi;
1146 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1148 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1149 control |= L2CAP_SUPER_RCV_NOT_READY;
1150 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1151 l2cap_send_sframe(pi, control);
1155 if (l2cap_ertm_send(sk) > 0)
1158 control |= L2CAP_SUPER_RCV_READY;
1159 l2cap_send_sframe(pi, control);
1162 static void l2cap_send_srejtail(struct sock *sk)
1164 struct srej_list *tail;
1167 control = L2CAP_SUPER_SELECT_REJECT;
1168 control |= L2CAP_CTRL_FINAL;
1170 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1171 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1173 l2cap_send_sframe(l2cap_pi(sk), control);
1176 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1178 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1179 struct sk_buff **frag;
1182 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1188 /* Continuation fragments (no L2CAP header) */
1189 frag = &skb_shinfo(skb)->frag_list;
1191 count = min_t(unsigned int, conn->mtu, len);
1193 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1196 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1202 frag = &(*frag)->next;
1208 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1210 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1211 struct sk_buff *skb;
1212 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1213 struct l2cap_hdr *lh;
1215 BT_DBG("sk %p len %d", sk, (int)len);
1217 count = min_t(unsigned int, (conn->mtu - hlen), len);
1218 skb = bt_skb_send_alloc(sk, count + hlen,
1219 msg->msg_flags & MSG_DONTWAIT, &err);
1221 return ERR_PTR(err);
1223 /* Create L2CAP header */
1224 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1225 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1226 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1227 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1229 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1230 if (unlikely(err < 0)) {
1232 return ERR_PTR(err);
1237 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1239 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1240 struct sk_buff *skb;
1241 int err, count, hlen = L2CAP_HDR_SIZE;
1242 struct l2cap_hdr *lh;
1244 BT_DBG("sk %p len %d", sk, (int)len);
1246 count = min_t(unsigned int, (conn->mtu - hlen), len);
1247 skb = bt_skb_send_alloc(sk, count + hlen,
1248 msg->msg_flags & MSG_DONTWAIT, &err);
1250 return ERR_PTR(err);
1252 /* Create L2CAP header */
1253 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1254 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1255 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1257 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1258 if (unlikely(err < 0)) {
1260 return ERR_PTR(err);
1265 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1267 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1268 struct sk_buff *skb;
1269 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1270 struct l2cap_hdr *lh;
1272 BT_DBG("sk %p len %d", sk, (int)len);
1275 return ERR_PTR(-ENOTCONN);
1280 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1283 count = min_t(unsigned int, (conn->mtu - hlen), len);
1284 skb = bt_skb_send_alloc(sk, count + hlen,
1285 msg->msg_flags & MSG_DONTWAIT, &err);
1287 return ERR_PTR(err);
1289 /* Create L2CAP header */
1290 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1291 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1292 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1293 put_unaligned_le16(control, skb_put(skb, 2));
1295 put_unaligned_le16(sdulen, skb_put(skb, 2));
1297 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1298 if (unlikely(err < 0)) {
1300 return ERR_PTR(err);
1303 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1304 put_unaligned_le16(0, skb_put(skb, 2));
1306 bt_cb(skb)->retries = 0;
1310 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1312 struct l2cap_pinfo *pi = l2cap_pi(sk);
1313 struct sk_buff *skb;
1314 struct sk_buff_head sar_queue;
1318 skb_queue_head_init(&sar_queue);
1319 control = L2CAP_SDU_START;
1320 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1322 return PTR_ERR(skb);
1324 __skb_queue_tail(&sar_queue, skb);
1325 len -= pi->remote_mps;
1326 size += pi->remote_mps;
1331 if (len > pi->remote_mps) {
1332 control = L2CAP_SDU_CONTINUE;
1333 buflen = pi->remote_mps;
1335 control = L2CAP_SDU_END;
1339 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1341 skb_queue_purge(&sar_queue);
1342 return PTR_ERR(skb);
1345 __skb_queue_tail(&sar_queue, skb);
1349 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1350 if (sk->sk_send_head == NULL)
1351 sk->sk_send_head = sar_queue.next;
1356 static void l2cap_chan_ready(struct sock *sk)
1358 struct sock *parent = bt_sk(sk)->parent;
1360 BT_DBG("sk %p, parent %p", sk, parent);
1362 l2cap_pi(sk)->conf_state = 0;
1363 l2cap_sock_clear_timer(sk);
1366 /* Outgoing channel.
1367 * Wake up socket sleeping on connect.
1369 sk->sk_state = BT_CONNECTED;
1370 sk->sk_state_change(sk);
1372 /* Incoming channel.
1373 * Wake up socket sleeping on accept.
1375 parent->sk_data_ready(parent, 0);
1379 /* Copy frame to all raw sockets on that connection */
1380 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1382 struct l2cap_chan_list *l = &conn->chan_list;
1383 struct sk_buff *nskb;
1386 BT_DBG("conn %p", conn);
1388 read_lock(&l->lock);
1389 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1390 if (sk->sk_type != SOCK_RAW)
1393 /* Don't send frame to the socket it came from */
1396 nskb = skb_clone(skb, GFP_ATOMIC);
1400 if (sock_queue_rcv_skb(sk, nskb))
1403 read_unlock(&l->lock);
1406 /* ---- L2CAP signalling commands ---- */
1407 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1408 u8 code, u8 ident, u16 dlen, void *data)
1410 struct sk_buff *skb, **frag;
1411 struct l2cap_cmd_hdr *cmd;
1412 struct l2cap_hdr *lh;
1415 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1416 conn, code, ident, dlen);
1418 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1419 count = min_t(unsigned int, conn->mtu, len);
1421 skb = bt_skb_alloc(count, GFP_ATOMIC);
1425 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1426 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1427 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1429 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1432 cmd->len = cpu_to_le16(dlen);
1435 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1436 memcpy(skb_put(skb, count), data, count);
1442 /* Continuation fragments (no L2CAP header) */
1443 frag = &skb_shinfo(skb)->frag_list;
1445 count = min_t(unsigned int, conn->mtu, len);
1447 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1451 memcpy(skb_put(*frag, count), data, count);
1456 frag = &(*frag)->next;
1466 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1468 struct l2cap_conf_opt *opt = *ptr;
1471 len = L2CAP_CONF_OPT_SIZE + opt->len;
1479 *val = *((u8 *) opt->val);
1483 *val = get_unaligned_le16(opt->val);
1487 *val = get_unaligned_le32(opt->val);
1491 *val = (unsigned long) opt->val;
1495 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1499 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1501 struct l2cap_conf_opt *opt = *ptr;
1503 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1510 *((u8 *) opt->val) = val;
1514 put_unaligned_le16(val, opt->val);
1518 put_unaligned_le32(val, opt->val);
1522 memcpy(opt->val, (void *) val, len);
1526 *ptr += L2CAP_CONF_OPT_SIZE + len;
1529 static void l2cap_ack_timeout(unsigned long arg)
1531 struct sock *sk = (void *) arg;
1534 l2cap_send_ack(l2cap_pi(sk));
1538 static inline void l2cap_ertm_init(struct sock *sk)
1540 l2cap_pi(sk)->expected_ack_seq = 0;
1541 l2cap_pi(sk)->unacked_frames = 0;
1542 l2cap_pi(sk)->buffer_seq = 0;
1543 l2cap_pi(sk)->num_acked = 0;
1544 l2cap_pi(sk)->frames_sent = 0;
1546 setup_timer(&l2cap_pi(sk)->retrans_timer,
1547 l2cap_retrans_timeout, (unsigned long) sk);
1548 setup_timer(&l2cap_pi(sk)->monitor_timer,
1549 l2cap_monitor_timeout, (unsigned long) sk);
1550 setup_timer(&l2cap_pi(sk)->ack_timer,
1551 l2cap_ack_timeout, (unsigned long) sk);
1553 __skb_queue_head_init(SREJ_QUEUE(sk));
1554 __skb_queue_head_init(BUSY_QUEUE(sk));
1556 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1558 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1561 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1564 case L2CAP_MODE_STREAMING:
1565 case L2CAP_MODE_ERTM:
1566 if (l2cap_mode_supported(mode, remote_feat_mask))
1570 return L2CAP_MODE_BASIC;
1574 int l2cap_build_conf_req(struct sock *sk, void *data)
1576 struct l2cap_pinfo *pi = l2cap_pi(sk);
1577 struct l2cap_conf_req *req = data;
1578 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1579 void *ptr = req->data;
1581 BT_DBG("sk %p", sk);
1583 if (pi->num_conf_req || pi->num_conf_rsp)
1587 case L2CAP_MODE_STREAMING:
1588 case L2CAP_MODE_ERTM:
1589 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1594 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1599 if (pi->imtu != L2CAP_DEFAULT_MTU)
1600 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1603 case L2CAP_MODE_BASIC:
1604 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1605 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1608 rfc.mode = L2CAP_MODE_BASIC;
1610 rfc.max_transmit = 0;
1611 rfc.retrans_timeout = 0;
1612 rfc.monitor_timeout = 0;
1613 rfc.max_pdu_size = 0;
1615 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1616 (unsigned long) &rfc);
1619 case L2CAP_MODE_ERTM:
1620 rfc.mode = L2CAP_MODE_ERTM;
1621 rfc.txwin_size = pi->tx_win;
1622 rfc.max_transmit = pi->max_tx;
1623 rfc.retrans_timeout = 0;
1624 rfc.monitor_timeout = 0;
1625 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1626 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1627 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1629 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1630 (unsigned long) &rfc);
1632 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1635 if (pi->fcs == L2CAP_FCS_NONE ||
1636 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1637 pi->fcs = L2CAP_FCS_NONE;
1638 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1642 case L2CAP_MODE_STREAMING:
1643 rfc.mode = L2CAP_MODE_STREAMING;
1645 rfc.max_transmit = 0;
1646 rfc.retrans_timeout = 0;
1647 rfc.monitor_timeout = 0;
1648 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1649 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1650 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1652 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1653 (unsigned long) &rfc);
1655 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1658 if (pi->fcs == L2CAP_FCS_NONE ||
1659 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1660 pi->fcs = L2CAP_FCS_NONE;
1661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1666 /* FIXME: Need actual value of the flush timeout */
1667 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1668 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1670 req->dcid = cpu_to_le16(pi->dcid);
1671 req->flags = cpu_to_le16(0);
1676 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1678 struct l2cap_pinfo *pi = l2cap_pi(sk);
1679 struct l2cap_conf_rsp *rsp = data;
1680 void *ptr = rsp->data;
1681 void *req = pi->conf_req;
1682 int len = pi->conf_len;
1683 int type, hint, olen;
1685 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1686 u16 mtu = L2CAP_DEFAULT_MTU;
1687 u16 result = L2CAP_CONF_SUCCESS;
1689 BT_DBG("sk %p", sk);
1691 while (len >= L2CAP_CONF_OPT_SIZE) {
1692 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1694 hint = type & L2CAP_CONF_HINT;
1695 type &= L2CAP_CONF_MASK;
1698 case L2CAP_CONF_MTU:
1702 case L2CAP_CONF_FLUSH_TO:
1706 case L2CAP_CONF_QOS:
1709 case L2CAP_CONF_RFC:
1710 if (olen == sizeof(rfc))
1711 memcpy(&rfc, (void *) val, olen);
1714 case L2CAP_CONF_FCS:
1715 if (val == L2CAP_FCS_NONE)
1716 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1724 result = L2CAP_CONF_UNKNOWN;
1725 *((u8 *) ptr++) = type;
1730 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1734 case L2CAP_MODE_STREAMING:
1735 case L2CAP_MODE_ERTM:
1736 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1737 pi->mode = l2cap_select_mode(rfc.mode,
1738 pi->conn->feat_mask);
1742 if (pi->mode != rfc.mode)
1743 return -ECONNREFUSED;
1749 if (pi->mode != rfc.mode) {
1750 result = L2CAP_CONF_UNACCEPT;
1751 rfc.mode = pi->mode;
1753 if (pi->num_conf_rsp == 1)
1754 return -ECONNREFUSED;
1756 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1757 sizeof(rfc), (unsigned long) &rfc);
1761 if (result == L2CAP_CONF_SUCCESS) {
1762 /* Configure output options and let the other side know
1763 * which ones we don't like. */
1765 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1766 result = L2CAP_CONF_UNACCEPT;
1769 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1771 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1774 case L2CAP_MODE_BASIC:
1775 pi->fcs = L2CAP_FCS_NONE;
1776 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1779 case L2CAP_MODE_ERTM:
1780 pi->remote_tx_win = rfc.txwin_size;
1781 pi->remote_max_tx = rfc.max_transmit;
1783 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1784 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1786 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1788 rfc.retrans_timeout =
1789 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1790 rfc.monitor_timeout =
1791 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1793 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1795 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1796 sizeof(rfc), (unsigned long) &rfc);
1800 case L2CAP_MODE_STREAMING:
1801 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1802 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1804 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1806 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1808 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1809 sizeof(rfc), (unsigned long) &rfc);
1814 result = L2CAP_CONF_UNACCEPT;
1816 memset(&rfc, 0, sizeof(rfc));
1817 rfc.mode = pi->mode;
1820 if (result == L2CAP_CONF_SUCCESS)
1821 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1823 rsp->scid = cpu_to_le16(pi->dcid);
1824 rsp->result = cpu_to_le16(result);
1825 rsp->flags = cpu_to_le16(0x0000);
1830 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1832 struct l2cap_pinfo *pi = l2cap_pi(sk);
1833 struct l2cap_conf_req *req = data;
1834 void *ptr = req->data;
1837 struct l2cap_conf_rfc rfc;
1839 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1841 while (len >= L2CAP_CONF_OPT_SIZE) {
1842 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1845 case L2CAP_CONF_MTU:
1846 if (val < L2CAP_DEFAULT_MIN_MTU) {
1847 *result = L2CAP_CONF_UNACCEPT;
1848 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1851 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1854 case L2CAP_CONF_FLUSH_TO:
1856 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1860 case L2CAP_CONF_RFC:
1861 if (olen == sizeof(rfc))
1862 memcpy(&rfc, (void *)val, olen);
1864 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1865 rfc.mode != pi->mode)
1866 return -ECONNREFUSED;
1870 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1871 sizeof(rfc), (unsigned long) &rfc);
1876 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1877 return -ECONNREFUSED;
1879 pi->mode = rfc.mode;
1881 if (*result == L2CAP_CONF_SUCCESS) {
1883 case L2CAP_MODE_ERTM:
1884 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1885 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1886 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1888 case L2CAP_MODE_STREAMING:
1889 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1893 req->dcid = cpu_to_le16(pi->dcid);
1894 req->flags = cpu_to_le16(0x0000);
1899 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1901 struct l2cap_conf_rsp *rsp = data;
1902 void *ptr = rsp->data;
1904 BT_DBG("sk %p", sk);
1906 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1907 rsp->result = cpu_to_le16(result);
1908 rsp->flags = cpu_to_le16(flags);
1913 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1915 struct l2cap_pinfo *pi = l2cap_pi(sk);
1918 struct l2cap_conf_rfc rfc;
1920 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1922 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1925 while (len >= L2CAP_CONF_OPT_SIZE) {
1926 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1929 case L2CAP_CONF_RFC:
1930 if (olen == sizeof(rfc))
1931 memcpy(&rfc, (void *)val, olen);
1938 case L2CAP_MODE_ERTM:
1939 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1940 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1941 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1943 case L2CAP_MODE_STREAMING:
1944 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1948 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1950 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1952 if (rej->reason != 0x0000)
1955 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1956 cmd->ident == conn->info_ident) {
1957 del_timer(&conn->info_timer);
1959 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1960 conn->info_ident = 0;
1962 l2cap_conn_start(conn);
1968 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1970 struct l2cap_chan_list *list = &conn->chan_list;
1971 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1972 struct l2cap_conn_rsp rsp;
1973 struct sock *parent, *sk = NULL;
1974 int result, status = L2CAP_CS_NO_INFO;
1976 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1977 __le16 psm = req->psm;
1979 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1981 /* Check if we have socket listening on psm */
1982 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1984 result = L2CAP_CR_BAD_PSM;
1988 bh_lock_sock(parent);
1990 /* Check if the ACL is secure enough (if not SDP) */
1991 if (psm != cpu_to_le16(0x0001) &&
1992 !hci_conn_check_link_mode(conn->hcon)) {
1993 conn->disc_reason = 0x05;
1994 result = L2CAP_CR_SEC_BLOCK;
1998 result = L2CAP_CR_NO_MEM;
2000 /* Check for backlog size */
2001 if (sk_acceptq_is_full(parent)) {
2002 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2006 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2010 write_lock_bh(&list->lock);
2012 /* Check if we already have channel with that dcid */
2013 if (__l2cap_get_chan_by_dcid(list, scid)) {
2014 write_unlock_bh(&list->lock);
2015 sock_set_flag(sk, SOCK_ZAPPED);
2016 l2cap_sock_kill(sk);
2020 hci_conn_hold(conn->hcon);
2022 l2cap_sock_init(sk, parent);
2023 bacpy(&bt_sk(sk)->src, conn->src);
2024 bacpy(&bt_sk(sk)->dst, conn->dst);
2025 l2cap_pi(sk)->psm = psm;
2026 l2cap_pi(sk)->dcid = scid;
2028 __l2cap_chan_add(conn, sk, parent);
2029 dcid = l2cap_pi(sk)->scid;
2031 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2033 l2cap_pi(sk)->ident = cmd->ident;
2035 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2036 if (l2cap_check_security(sk)) {
2037 if (bt_sk(sk)->defer_setup) {
2038 sk->sk_state = BT_CONNECT2;
2039 result = L2CAP_CR_PEND;
2040 status = L2CAP_CS_AUTHOR_PEND;
2041 parent->sk_data_ready(parent, 0);
2043 sk->sk_state = BT_CONFIG;
2044 result = L2CAP_CR_SUCCESS;
2045 status = L2CAP_CS_NO_INFO;
2048 sk->sk_state = BT_CONNECT2;
2049 result = L2CAP_CR_PEND;
2050 status = L2CAP_CS_AUTHEN_PEND;
2053 sk->sk_state = BT_CONNECT2;
2054 result = L2CAP_CR_PEND;
2055 status = L2CAP_CS_NO_INFO;
2058 write_unlock_bh(&list->lock);
2061 bh_unlock_sock(parent);
2064 rsp.scid = cpu_to_le16(scid);
2065 rsp.dcid = cpu_to_le16(dcid);
2066 rsp.result = cpu_to_le16(result);
2067 rsp.status = cpu_to_le16(status);
2068 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2070 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2071 struct l2cap_info_req info;
2072 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2074 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2075 conn->info_ident = l2cap_get_ident(conn);
2077 mod_timer(&conn->info_timer, jiffies +
2078 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2080 l2cap_send_cmd(conn, conn->info_ident,
2081 L2CAP_INFO_REQ, sizeof(info), &info);
2084 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2085 result == L2CAP_CR_SUCCESS) {
2087 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2088 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2089 l2cap_build_conf_req(sk, buf), buf);
2090 l2cap_pi(sk)->num_conf_req++;
2096 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2098 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2099 u16 scid, dcid, result, status;
2103 scid = __le16_to_cpu(rsp->scid);
2104 dcid = __le16_to_cpu(rsp->dcid);
2105 result = __le16_to_cpu(rsp->result);
2106 status = __le16_to_cpu(rsp->status);
2108 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2111 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2115 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2121 case L2CAP_CR_SUCCESS:
2122 sk->sk_state = BT_CONFIG;
2123 l2cap_pi(sk)->ident = 0;
2124 l2cap_pi(sk)->dcid = dcid;
2125 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2127 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2130 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2132 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2133 l2cap_build_conf_req(sk, req), req);
2134 l2cap_pi(sk)->num_conf_req++;
2138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2142 /* don't delete l2cap channel if sk is owned by user */
2143 if (sock_owned_by_user(sk)) {
2144 sk->sk_state = BT_DISCONN;
2145 l2cap_sock_clear_timer(sk);
2146 l2cap_sock_set_timer(sk, HZ / 5);
2150 l2cap_chan_del(sk, ECONNREFUSED);
2158 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2160 /* FCS is enabled only in ERTM or streaming mode, if one or both
2163 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2164 pi->fcs = L2CAP_FCS_NONE;
2165 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2166 pi->fcs = L2CAP_FCS_CRC16;
2169 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2171 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2177 dcid = __le16_to_cpu(req->dcid);
2178 flags = __le16_to_cpu(req->flags);
2180 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2182 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2186 if (sk->sk_state != BT_CONFIG) {
2187 struct l2cap_cmd_rej rej;
2189 rej.reason = cpu_to_le16(0x0002);
2190 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2195 /* Reject if config buffer is too small. */
2196 len = cmd_len - sizeof(*req);
2197 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2198 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2199 l2cap_build_conf_rsp(sk, rsp,
2200 L2CAP_CONF_REJECT, flags), rsp);
2205 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2206 l2cap_pi(sk)->conf_len += len;
2208 if (flags & 0x0001) {
2209 /* Incomplete config. Send empty response. */
2210 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2211 l2cap_build_conf_rsp(sk, rsp,
2212 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2216 /* Complete config. */
2217 len = l2cap_parse_conf_req(sk, rsp);
2219 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2223 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2224 l2cap_pi(sk)->num_conf_rsp++;
2226 /* Reset config buffer. */
2227 l2cap_pi(sk)->conf_len = 0;
2229 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2232 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2233 set_default_fcs(l2cap_pi(sk));
2235 sk->sk_state = BT_CONNECTED;
2237 l2cap_pi(sk)->next_tx_seq = 0;
2238 l2cap_pi(sk)->expected_tx_seq = 0;
2239 __skb_queue_head_init(TX_QUEUE(sk));
2240 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2241 l2cap_ertm_init(sk);
2243 l2cap_chan_ready(sk);
2247 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2249 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2250 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2251 l2cap_build_conf_req(sk, buf), buf);
2252 l2cap_pi(sk)->num_conf_req++;
2260 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2262 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2263 u16 scid, flags, result;
2265 int len = cmd->len - sizeof(*rsp);
2267 scid = __le16_to_cpu(rsp->scid);
2268 flags = __le16_to_cpu(rsp->flags);
2269 result = __le16_to_cpu(rsp->result);
2271 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2272 scid, flags, result);
2274 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2279 case L2CAP_CONF_SUCCESS:
2280 l2cap_conf_rfc_get(sk, rsp->data, len);
2283 case L2CAP_CONF_UNACCEPT:
2284 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2287 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2288 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2292 /* throw out any old stored conf requests */
2293 result = L2CAP_CONF_SUCCESS;
2294 len = l2cap_parse_conf_rsp(sk, rsp->data,
2297 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2301 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2302 L2CAP_CONF_REQ, len, req);
2303 l2cap_pi(sk)->num_conf_req++;
2304 if (result != L2CAP_CONF_SUCCESS)
2310 sk->sk_err = ECONNRESET;
2311 l2cap_sock_set_timer(sk, HZ * 5);
2312 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2319 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2321 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2322 set_default_fcs(l2cap_pi(sk));
2324 sk->sk_state = BT_CONNECTED;
2325 l2cap_pi(sk)->next_tx_seq = 0;
2326 l2cap_pi(sk)->expected_tx_seq = 0;
2327 __skb_queue_head_init(TX_QUEUE(sk));
2328 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2329 l2cap_ertm_init(sk);
2331 l2cap_chan_ready(sk);
2339 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2341 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2342 struct l2cap_disconn_rsp rsp;
2346 scid = __le16_to_cpu(req->scid);
2347 dcid = __le16_to_cpu(req->dcid);
2349 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2351 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2355 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2356 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2357 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2359 sk->sk_shutdown = SHUTDOWN_MASK;
2361 /* don't delete l2cap channel if sk is owned by user */
2362 if (sock_owned_by_user(sk)) {
2363 sk->sk_state = BT_DISCONN;
2364 l2cap_sock_clear_timer(sk);
2365 l2cap_sock_set_timer(sk, HZ / 5);
2370 l2cap_chan_del(sk, ECONNRESET);
2373 l2cap_sock_kill(sk);
2377 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2379 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2383 scid = __le16_to_cpu(rsp->scid);
2384 dcid = __le16_to_cpu(rsp->dcid);
2386 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2388 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2392 /* don't delete l2cap channel if sk is owned by user */
2393 if (sock_owned_by_user(sk)) {
2394 sk->sk_state = BT_DISCONN;
2395 l2cap_sock_clear_timer(sk);
2396 l2cap_sock_set_timer(sk, HZ / 5);
2401 l2cap_chan_del(sk, 0);
2404 l2cap_sock_kill(sk);
2408 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2410 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2413 type = __le16_to_cpu(req->type);
2415 BT_DBG("type 0x%4.4x", type);
2417 if (type == L2CAP_IT_FEAT_MASK) {
2419 u32 feat_mask = l2cap_feat_mask;
2420 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2421 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2422 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2424 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2426 put_unaligned_le32(feat_mask, rsp->data);
2427 l2cap_send_cmd(conn, cmd->ident,
2428 L2CAP_INFO_RSP, sizeof(buf), buf);
2429 } else if (type == L2CAP_IT_FIXED_CHAN) {
2431 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2432 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2433 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2434 memcpy(buf + 4, l2cap_fixed_chan, 8);
2435 l2cap_send_cmd(conn, cmd->ident,
2436 L2CAP_INFO_RSP, sizeof(buf), buf);
2438 struct l2cap_info_rsp rsp;
2439 rsp.type = cpu_to_le16(type);
2440 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2441 l2cap_send_cmd(conn, cmd->ident,
2442 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2448 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2450 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2453 type = __le16_to_cpu(rsp->type);
2454 result = __le16_to_cpu(rsp->result);
2456 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2458 del_timer(&conn->info_timer);
2460 if (result != L2CAP_IR_SUCCESS) {
2461 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2462 conn->info_ident = 0;
2464 l2cap_conn_start(conn);
2469 if (type == L2CAP_IT_FEAT_MASK) {
2470 conn->feat_mask = get_unaligned_le32(rsp->data);
2472 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2473 struct l2cap_info_req req;
2474 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2476 conn->info_ident = l2cap_get_ident(conn);
2478 l2cap_send_cmd(conn, conn->info_ident,
2479 L2CAP_INFO_REQ, sizeof(req), &req);
2481 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2482 conn->info_ident = 0;
2484 l2cap_conn_start(conn);
2486 } else if (type == L2CAP_IT_FIXED_CHAN) {
2487 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2488 conn->info_ident = 0;
2490 l2cap_conn_start(conn);
2496 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2498 u8 *data = skb->data;
2500 struct l2cap_cmd_hdr cmd;
2503 l2cap_raw_recv(conn, skb);
2505 while (len >= L2CAP_CMD_HDR_SIZE) {
2507 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2508 data += L2CAP_CMD_HDR_SIZE;
2509 len -= L2CAP_CMD_HDR_SIZE;
2511 cmd_len = le16_to_cpu(cmd.len);
2513 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2515 if (cmd_len > len || !cmd.ident) {
2516 BT_DBG("corrupted command");
2521 case L2CAP_COMMAND_REJ:
2522 l2cap_command_rej(conn, &cmd, data);
2525 case L2CAP_CONN_REQ:
2526 err = l2cap_connect_req(conn, &cmd, data);
2529 case L2CAP_CONN_RSP:
2530 err = l2cap_connect_rsp(conn, &cmd, data);
2533 case L2CAP_CONF_REQ:
2534 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2537 case L2CAP_CONF_RSP:
2538 err = l2cap_config_rsp(conn, &cmd, data);
2541 case L2CAP_DISCONN_REQ:
2542 err = l2cap_disconnect_req(conn, &cmd, data);
2545 case L2CAP_DISCONN_RSP:
2546 err = l2cap_disconnect_rsp(conn, &cmd, data);
2549 case L2CAP_ECHO_REQ:
2550 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2553 case L2CAP_ECHO_RSP:
2556 case L2CAP_INFO_REQ:
2557 err = l2cap_information_req(conn, &cmd, data);
2560 case L2CAP_INFO_RSP:
2561 err = l2cap_information_rsp(conn, &cmd, data);
2565 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2571 struct l2cap_cmd_rej rej;
2572 BT_DBG("error %d", err);
2574 /* FIXME: Map err to a valid reason */
2575 rej.reason = cpu_to_le16(0);
2576 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2586 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2588 u16 our_fcs, rcv_fcs;
2589 int hdr_size = L2CAP_HDR_SIZE + 2;
2591 if (pi->fcs == L2CAP_FCS_CRC16) {
2592 skb_trim(skb, skb->len - 2);
2593 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2594 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2596 if (our_fcs != rcv_fcs)
2602 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2604 struct l2cap_pinfo *pi = l2cap_pi(sk);
2607 pi->frames_sent = 0;
2609 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2611 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2612 control |= L2CAP_SUPER_RCV_NOT_READY;
2613 l2cap_send_sframe(pi, control);
2614 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2617 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2618 l2cap_retransmit_frames(sk);
2620 l2cap_ertm_send(sk);
2622 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2623 pi->frames_sent == 0) {
2624 control |= L2CAP_SUPER_RCV_READY;
2625 l2cap_send_sframe(pi, control);
2629 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2631 struct sk_buff *next_skb;
2632 struct l2cap_pinfo *pi = l2cap_pi(sk);
2633 int tx_seq_offset, next_tx_seq_offset;
2635 bt_cb(skb)->tx_seq = tx_seq;
2636 bt_cb(skb)->sar = sar;
2638 next_skb = skb_peek(SREJ_QUEUE(sk));
2640 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2644 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2645 if (tx_seq_offset < 0)
2646 tx_seq_offset += 64;
2649 if (bt_cb(next_skb)->tx_seq == tx_seq)
2652 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2653 pi->buffer_seq) % 64;
2654 if (next_tx_seq_offset < 0)
2655 next_tx_seq_offset += 64;
2657 if (next_tx_seq_offset > tx_seq_offset) {
2658 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2662 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2665 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2667 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2672 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2674 struct l2cap_pinfo *pi = l2cap_pi(sk);
2675 struct sk_buff *_skb;
2678 switch (control & L2CAP_CTRL_SAR) {
2679 case L2CAP_SDU_UNSEGMENTED:
2680 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2683 err = sock_queue_rcv_skb(sk, skb);
2689 case L2CAP_SDU_START:
2690 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2693 pi->sdu_len = get_unaligned_le16(skb->data);
2695 if (pi->sdu_len > pi->imtu)
2698 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2702 /* pull sdu_len bytes only after alloc, because of Local Busy
2703 * condition we have to be sure that this will be executed
2704 * only once, i.e., when alloc does not fail */
2707 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2709 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2710 pi->partial_sdu_len = skb->len;
2713 case L2CAP_SDU_CONTINUE:
2714 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2720 pi->partial_sdu_len += skb->len;
2721 if (pi->partial_sdu_len > pi->sdu_len)
2724 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2729 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2735 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2736 pi->partial_sdu_len += skb->len;
2738 if (pi->partial_sdu_len > pi->imtu)
2741 if (pi->partial_sdu_len != pi->sdu_len)
2744 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2747 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2749 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2753 err = sock_queue_rcv_skb(sk, _skb);
2756 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2760 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2761 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2775 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2780 static int l2cap_try_push_rx_skb(struct sock *sk)
2782 struct l2cap_pinfo *pi = l2cap_pi(sk);
2783 struct sk_buff *skb;
2787 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2788 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2789 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2791 skb_queue_head(BUSY_QUEUE(sk), skb);
2795 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2798 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2801 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2802 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2803 l2cap_send_sframe(pi, control);
2804 l2cap_pi(sk)->retry_count = 1;
2806 del_timer(&pi->retrans_timer);
2807 __mod_monitor_timer();
2809 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2812 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2813 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2815 BT_DBG("sk %p, Exit local busy", sk);
2820 static void l2cap_busy_work(struct work_struct *work)
2822 DECLARE_WAITQUEUE(wait, current);
2823 struct l2cap_pinfo *pi =
2824 container_of(work, struct l2cap_pinfo, busy_work);
2825 struct sock *sk = (struct sock *)pi;
2826 int n_tries = 0, timeo = HZ/5, err;
2827 struct sk_buff *skb;
2831 add_wait_queue(sk_sleep(sk), &wait);
2832 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2833 set_current_state(TASK_INTERRUPTIBLE);
2835 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2837 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2844 if (signal_pending(current)) {
2845 err = sock_intr_errno(timeo);
2850 timeo = schedule_timeout(timeo);
2853 err = sock_error(sk);
2857 if (l2cap_try_push_rx_skb(sk) == 0)
2861 set_current_state(TASK_RUNNING);
2862 remove_wait_queue(sk_sleep(sk), &wait);
2867 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2869 struct l2cap_pinfo *pi = l2cap_pi(sk);
2872 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2873 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2874 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2875 return l2cap_try_push_rx_skb(sk);
2880 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2882 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2886 /* Busy Condition */
2887 BT_DBG("sk %p, Enter local busy", sk);
2889 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2890 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2891 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2893 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2894 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2895 l2cap_send_sframe(pi, sctrl);
2897 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2899 del_timer(&pi->ack_timer);
2901 queue_work(_busy_wq, &pi->busy_work);
2906 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2908 struct l2cap_pinfo *pi = l2cap_pi(sk);
2909 struct sk_buff *_skb;
2913 * TODO: We have to notify the userland if some data is lost with the
2917 switch (control & L2CAP_CTRL_SAR) {
2918 case L2CAP_SDU_UNSEGMENTED:
2919 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2924 err = sock_queue_rcv_skb(sk, skb);
2930 case L2CAP_SDU_START:
2931 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2936 pi->sdu_len = get_unaligned_le16(skb->data);
2939 if (pi->sdu_len > pi->imtu) {
2944 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2950 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2952 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2953 pi->partial_sdu_len = skb->len;
2957 case L2CAP_SDU_CONTINUE:
2958 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2961 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2963 pi->partial_sdu_len += skb->len;
2964 if (pi->partial_sdu_len > pi->sdu_len)
2972 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2975 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2977 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2978 pi->partial_sdu_len += skb->len;
2980 if (pi->partial_sdu_len > pi->imtu)
2983 if (pi->partial_sdu_len == pi->sdu_len) {
2984 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2985 err = sock_queue_rcv_skb(sk, _skb);
3000 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3002 struct sk_buff *skb;
3005 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3006 if (bt_cb(skb)->tx_seq != tx_seq)
3009 skb = skb_dequeue(SREJ_QUEUE(sk));
3010 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3011 l2cap_ertm_reassembly_sdu(sk, skb, control);
3012 l2cap_pi(sk)->buffer_seq_srej =
3013 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3014 tx_seq = (tx_seq + 1) % 64;
3018 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3020 struct l2cap_pinfo *pi = l2cap_pi(sk);
3021 struct srej_list *l, *tmp;
3024 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3025 if (l->tx_seq == tx_seq) {
3030 control = L2CAP_SUPER_SELECT_REJECT;
3031 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3032 l2cap_send_sframe(pi, control);
3034 list_add_tail(&l->list, SREJ_LIST(sk));
3038 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3040 struct l2cap_pinfo *pi = l2cap_pi(sk);
3041 struct srej_list *new;
3044 while (tx_seq != pi->expected_tx_seq) {
3045 control = L2CAP_SUPER_SELECT_REJECT;
3046 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3047 l2cap_send_sframe(pi, control);
3049 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3050 new->tx_seq = pi->expected_tx_seq;
3051 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3052 list_add_tail(&new->list, SREJ_LIST(sk));
3054 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3057 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3059 struct l2cap_pinfo *pi = l2cap_pi(sk);
3060 u8 tx_seq = __get_txseq(rx_control);
3061 u8 req_seq = __get_reqseq(rx_control);
3062 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3063 int tx_seq_offset, expected_tx_seq_offset;
3064 int num_to_ack = (pi->tx_win/6) + 1;
3067 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3070 if (L2CAP_CTRL_FINAL & rx_control &&
3071 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3072 del_timer(&pi->monitor_timer);
3073 if (pi->unacked_frames > 0)
3074 __mod_retrans_timer();
3075 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3078 pi->expected_ack_seq = req_seq;
3079 l2cap_drop_acked_frames(sk);
3081 if (tx_seq == pi->expected_tx_seq)
3084 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3085 if (tx_seq_offset < 0)
3086 tx_seq_offset += 64;
3088 /* invalid tx_seq */
3089 if (tx_seq_offset >= pi->tx_win) {
3090 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3094 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3097 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3098 struct srej_list *first;
3100 first = list_first_entry(SREJ_LIST(sk),
3101 struct srej_list, list);
3102 if (tx_seq == first->tx_seq) {
3103 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3104 l2cap_check_srej_gap(sk, tx_seq);
3106 list_del(&first->list);
3109 if (list_empty(SREJ_LIST(sk))) {
3110 pi->buffer_seq = pi->buffer_seq_srej;
3111 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3113 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3116 struct srej_list *l;
3118 /* duplicated tx_seq */
3119 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3122 list_for_each_entry(l, SREJ_LIST(sk), list) {
3123 if (l->tx_seq == tx_seq) {
3124 l2cap_resend_srejframe(sk, tx_seq);
3128 l2cap_send_srejframe(sk, tx_seq);
3131 expected_tx_seq_offset =
3132 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3133 if (expected_tx_seq_offset < 0)
3134 expected_tx_seq_offset += 64;
3136 /* duplicated tx_seq */
3137 if (tx_seq_offset < expected_tx_seq_offset)
3140 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3142 BT_DBG("sk %p, Enter SREJ", sk);
3144 INIT_LIST_HEAD(SREJ_LIST(sk));
3145 pi->buffer_seq_srej = pi->buffer_seq;
3147 __skb_queue_head_init(SREJ_QUEUE(sk));
3148 __skb_queue_head_init(BUSY_QUEUE(sk));
3149 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3151 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3153 l2cap_send_srejframe(sk, tx_seq);
3155 del_timer(&pi->ack_timer);
3160 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3162 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3163 bt_cb(skb)->tx_seq = tx_seq;
3164 bt_cb(skb)->sar = sar;
3165 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3169 err = l2cap_push_rx_skb(sk, skb, rx_control);
3173 if (rx_control & L2CAP_CTRL_FINAL) {
3174 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3175 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3177 l2cap_retransmit_frames(sk);
3182 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3183 if (pi->num_acked == num_to_ack - 1)
3193 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3195 struct l2cap_pinfo *pi = l2cap_pi(sk);
3197 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3200 pi->expected_ack_seq = __get_reqseq(rx_control);
3201 l2cap_drop_acked_frames(sk);
3203 if (rx_control & L2CAP_CTRL_POLL) {
3204 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3205 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3206 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3207 (pi->unacked_frames > 0))
3208 __mod_retrans_timer();
3210 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3211 l2cap_send_srejtail(sk);
3213 l2cap_send_i_or_rr_or_rnr(sk);
3216 } else if (rx_control & L2CAP_CTRL_FINAL) {
3217 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3219 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3220 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3222 l2cap_retransmit_frames(sk);
3225 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3226 (pi->unacked_frames > 0))
3227 __mod_retrans_timer();
3229 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3230 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3233 l2cap_ertm_send(sk);
3237 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3239 struct l2cap_pinfo *pi = l2cap_pi(sk);
3240 u8 tx_seq = __get_reqseq(rx_control);
3242 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3244 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3246 pi->expected_ack_seq = tx_seq;
3247 l2cap_drop_acked_frames(sk);
3249 if (rx_control & L2CAP_CTRL_FINAL) {
3250 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3251 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3253 l2cap_retransmit_frames(sk);
3255 l2cap_retransmit_frames(sk);
3257 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3258 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3261 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3263 struct l2cap_pinfo *pi = l2cap_pi(sk);
3264 u8 tx_seq = __get_reqseq(rx_control);
3266 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3268 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3270 if (rx_control & L2CAP_CTRL_POLL) {
3271 pi->expected_ack_seq = tx_seq;
3272 l2cap_drop_acked_frames(sk);
3274 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3275 l2cap_retransmit_one_frame(sk, tx_seq);
3277 l2cap_ertm_send(sk);
3279 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3280 pi->srej_save_reqseq = tx_seq;
3281 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3283 } else if (rx_control & L2CAP_CTRL_FINAL) {
3284 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3285 pi->srej_save_reqseq == tx_seq)
3286 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3288 l2cap_retransmit_one_frame(sk, tx_seq);
3290 l2cap_retransmit_one_frame(sk, tx_seq);
3291 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3292 pi->srej_save_reqseq = tx_seq;
3293 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3298 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3300 struct l2cap_pinfo *pi = l2cap_pi(sk);
3301 u8 tx_seq = __get_reqseq(rx_control);
3303 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3305 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3306 pi->expected_ack_seq = tx_seq;
3307 l2cap_drop_acked_frames(sk);
3309 if (rx_control & L2CAP_CTRL_POLL)
3310 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3312 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3313 del_timer(&pi->retrans_timer);
3314 if (rx_control & L2CAP_CTRL_POLL)
3315 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3319 if (rx_control & L2CAP_CTRL_POLL)
3320 l2cap_send_srejtail(sk);
3322 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3325 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3327 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3329 if (L2CAP_CTRL_FINAL & rx_control &&
3330 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3331 del_timer(&l2cap_pi(sk)->monitor_timer);
3332 if (l2cap_pi(sk)->unacked_frames > 0)
3333 __mod_retrans_timer();
3334 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3337 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3338 case L2CAP_SUPER_RCV_READY:
3339 l2cap_data_channel_rrframe(sk, rx_control);
3342 case L2CAP_SUPER_REJECT:
3343 l2cap_data_channel_rejframe(sk, rx_control);
3346 case L2CAP_SUPER_SELECT_REJECT:
3347 l2cap_data_channel_srejframe(sk, rx_control);
3350 case L2CAP_SUPER_RCV_NOT_READY:
3351 l2cap_data_channel_rnrframe(sk, rx_control);
3359 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3361 struct l2cap_pinfo *pi = l2cap_pi(sk);
3364 int len, next_tx_seq_offset, req_seq_offset;
3366 control = get_unaligned_le16(skb->data);
3371 * We can just drop the corrupted I-frame here.
3372 * Receiver will miss it and start proper recovery
3373 * procedures and ask retransmission.
3375 if (l2cap_check_fcs(pi, skb))
3378 if (__is_sar_start(control) && __is_iframe(control))
3381 if (pi->fcs == L2CAP_FCS_CRC16)
3384 if (len > pi->mps) {
3385 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3389 req_seq = __get_reqseq(control);
3390 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3391 if (req_seq_offset < 0)
3392 req_seq_offset += 64;
3394 next_tx_seq_offset =
3395 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3396 if (next_tx_seq_offset < 0)
3397 next_tx_seq_offset += 64;
3399 /* check for invalid req-seq */
3400 if (req_seq_offset > next_tx_seq_offset) {
3401 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3405 if (__is_iframe(control)) {
3407 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3411 l2cap_data_channel_iframe(sk, control, skb);
3415 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3419 l2cap_data_channel_sframe(sk, control, skb);
3429 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3432 struct l2cap_pinfo *pi;
3437 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3439 BT_DBG("unknown cid 0x%4.4x", cid);
3445 BT_DBG("sk %p, len %d", sk, skb->len);
3447 if (sk->sk_state != BT_CONNECTED)
3451 case L2CAP_MODE_BASIC:
3452 /* If socket recv buffers overflows we drop data here
3453 * which is *bad* because L2CAP has to be reliable.
3454 * But we don't have any other choice. L2CAP doesn't
3455 * provide flow control mechanism. */
3457 if (pi->imtu < skb->len)
3460 if (!sock_queue_rcv_skb(sk, skb))
3464 case L2CAP_MODE_ERTM:
3465 if (!sock_owned_by_user(sk)) {
3466 l2cap_ertm_data_rcv(sk, skb);
3468 if (sk_add_backlog(sk, skb))
3474 case L2CAP_MODE_STREAMING:
3475 control = get_unaligned_le16(skb->data);
3479 if (l2cap_check_fcs(pi, skb))
3482 if (__is_sar_start(control))
3485 if (pi->fcs == L2CAP_FCS_CRC16)
3488 if (len > pi->mps || len < 0 || __is_sframe(control))
3491 tx_seq = __get_txseq(control);
3493 if (pi->expected_tx_seq == tx_seq)
3494 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3496 pi->expected_tx_seq = (tx_seq + 1) % 64;
3498 l2cap_streaming_reassembly_sdu(sk, skb, control);
3503 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3517 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3521 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3527 BT_DBG("sk %p, len %d", sk, skb->len);
3529 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3532 if (l2cap_pi(sk)->imtu < skb->len)
3535 if (!sock_queue_rcv_skb(sk, skb))
3547 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3549 struct l2cap_hdr *lh = (void *) skb->data;
3553 skb_pull(skb, L2CAP_HDR_SIZE);
3554 cid = __le16_to_cpu(lh->cid);
3555 len = __le16_to_cpu(lh->len);
3557 if (len != skb->len) {
3562 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3565 case L2CAP_CID_SIGNALING:
3566 l2cap_sig_channel(conn, skb);
3569 case L2CAP_CID_CONN_LESS:
3570 psm = get_unaligned_le16(skb->data);
3572 l2cap_conless_channel(conn, psm, skb);
3576 l2cap_data_channel(conn, cid, skb);
3581 /* ---- L2CAP interface with lower layer (HCI) ---- */
3583 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3585 int exact = 0, lm1 = 0, lm2 = 0;
3586 register struct sock *sk;
3587 struct hlist_node *node;
3589 if (type != ACL_LINK)
3592 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3594 /* Find listening sockets and check their link_mode */
3595 read_lock(&l2cap_sk_list.lock);
3596 sk_for_each(sk, node, &l2cap_sk_list.head) {
3597 if (sk->sk_state != BT_LISTEN)
3600 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3601 lm1 |= HCI_LM_ACCEPT;
3602 if (l2cap_pi(sk)->role_switch)
3603 lm1 |= HCI_LM_MASTER;
3605 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3606 lm2 |= HCI_LM_ACCEPT;
3607 if (l2cap_pi(sk)->role_switch)
3608 lm2 |= HCI_LM_MASTER;
3611 read_unlock(&l2cap_sk_list.lock);
3613 return exact ? lm1 : lm2;
3616 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3618 struct l2cap_conn *conn;
3620 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3622 if (hcon->type != ACL_LINK)
3626 conn = l2cap_conn_add(hcon, status);
3628 l2cap_conn_ready(conn);
3630 l2cap_conn_del(hcon, bt_err(status));
3635 static int l2cap_disconn_ind(struct hci_conn *hcon)
3637 struct l2cap_conn *conn = hcon->l2cap_data;
3639 BT_DBG("hcon %p", hcon);
3641 if (hcon->type != ACL_LINK || !conn)
3644 return conn->disc_reason;
3647 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3649 BT_DBG("hcon %p reason %d", hcon, reason);
3651 if (hcon->type != ACL_LINK)
3654 l2cap_conn_del(hcon, bt_err(reason));
3659 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3661 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3664 if (encrypt == 0x00) {
3665 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3666 l2cap_sock_clear_timer(sk);
3667 l2cap_sock_set_timer(sk, HZ * 5);
3668 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3669 __l2cap_sock_close(sk, ECONNREFUSED);
3671 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3672 l2cap_sock_clear_timer(sk);
3676 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3678 struct l2cap_chan_list *l;
3679 struct l2cap_conn *conn = hcon->l2cap_data;
3685 l = &conn->chan_list;
3687 BT_DBG("conn %p", conn);
3689 read_lock(&l->lock);
3691 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3694 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3699 if (!status && (sk->sk_state == BT_CONNECTED ||
3700 sk->sk_state == BT_CONFIG)) {
3701 l2cap_check_encryption(sk, encrypt);
3706 if (sk->sk_state == BT_CONNECT) {
3708 struct l2cap_conn_req req;
3709 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3710 req.psm = l2cap_pi(sk)->psm;
3712 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3713 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3715 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3716 L2CAP_CONN_REQ, sizeof(req), &req);
3718 l2cap_sock_clear_timer(sk);
3719 l2cap_sock_set_timer(sk, HZ / 10);
3721 } else if (sk->sk_state == BT_CONNECT2) {
3722 struct l2cap_conn_rsp rsp;
3726 sk->sk_state = BT_CONFIG;
3727 result = L2CAP_CR_SUCCESS;
3729 sk->sk_state = BT_DISCONN;
3730 l2cap_sock_set_timer(sk, HZ / 10);
3731 result = L2CAP_CR_SEC_BLOCK;
3734 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3735 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3736 rsp.result = cpu_to_le16(result);
3737 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3738 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3739 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3745 read_unlock(&l->lock);
3750 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3752 struct l2cap_conn *conn = hcon->l2cap_data;
3755 conn = l2cap_conn_add(hcon, 0);
3760 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3762 if (!(flags & ACL_CONT)) {
3763 struct l2cap_hdr *hdr;
3769 BT_ERR("Unexpected start frame (len %d)", skb->len);
3770 kfree_skb(conn->rx_skb);
3771 conn->rx_skb = NULL;
3773 l2cap_conn_unreliable(conn, ECOMM);
3776 /* Start fragment always begin with Basic L2CAP header */
3777 if (skb->len < L2CAP_HDR_SIZE) {
3778 BT_ERR("Frame is too short (len %d)", skb->len);
3779 l2cap_conn_unreliable(conn, ECOMM);
3783 hdr = (struct l2cap_hdr *) skb->data;
3784 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3785 cid = __le16_to_cpu(hdr->cid);
3787 if (len == skb->len) {
3788 /* Complete frame received */
3789 l2cap_recv_frame(conn, skb);
3793 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3795 if (skb->len > len) {
3796 BT_ERR("Frame is too long (len %d, expected len %d)",
3798 l2cap_conn_unreliable(conn, ECOMM);
3802 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3804 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3805 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3806 len, l2cap_pi(sk)->imtu);
3808 l2cap_conn_unreliable(conn, ECOMM);
3815 /* Allocate skb for the complete frame (with header) */
3816 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3820 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3822 conn->rx_len = len - skb->len;
3824 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3826 if (!conn->rx_len) {
3827 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3828 l2cap_conn_unreliable(conn, ECOMM);
3832 if (skb->len > conn->rx_len) {
3833 BT_ERR("Fragment is too long (len %d, expected %d)",
3834 skb->len, conn->rx_len);
3835 kfree_skb(conn->rx_skb);
3836 conn->rx_skb = NULL;
3838 l2cap_conn_unreliable(conn, ECOMM);
3842 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3844 conn->rx_len -= skb->len;
3846 if (!conn->rx_len) {
3847 /* Complete frame received */
3848 l2cap_recv_frame(conn, conn->rx_skb);
3849 conn->rx_skb = NULL;
3858 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3861 struct hlist_node *node;
3863 read_lock_bh(&l2cap_sk_list.lock);
3865 sk_for_each(sk, node, &l2cap_sk_list.head) {
3866 struct l2cap_pinfo *pi = l2cap_pi(sk);
3868 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3869 batostr(&bt_sk(sk)->src),
3870 batostr(&bt_sk(sk)->dst),
3871 sk->sk_state, __le16_to_cpu(pi->psm),
3873 pi->imtu, pi->omtu, pi->sec_level);
3876 read_unlock_bh(&l2cap_sk_list.lock);
3881 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3883 return single_open(file, l2cap_debugfs_show, inode->i_private);
3886 static const struct file_operations l2cap_debugfs_fops = {
3887 .open = l2cap_debugfs_open,
3889 .llseek = seq_lseek,
3890 .release = single_release,
3893 static struct dentry *l2cap_debugfs;
3895 static struct hci_proto l2cap_hci_proto = {
3897 .id = HCI_PROTO_L2CAP,
3898 .connect_ind = l2cap_connect_ind,
3899 .connect_cfm = l2cap_connect_cfm,
3900 .disconn_ind = l2cap_disconn_ind,
3901 .disconn_cfm = l2cap_disconn_cfm,
3902 .security_cfm = l2cap_security_cfm,
3903 .recv_acldata = l2cap_recv_acldata
3906 static int __init l2cap_init(void)
3910 err = l2cap_init_sockets();
3914 _busy_wq = create_singlethread_workqueue("l2cap");
3920 err = hci_register_proto(&l2cap_hci_proto);
3922 BT_ERR("L2CAP protocol registration failed");
3923 bt_sock_unregister(BTPROTO_L2CAP);
3928 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
3929 bt_debugfs, NULL, &l2cap_debugfs_fops);
3931 BT_ERR("Failed to create L2CAP debug file");
3934 BT_INFO("L2CAP ver %s", VERSION);
3935 BT_INFO("L2CAP socket layer initialized");
3940 destroy_workqueue(_busy_wq);
3941 l2cap_cleanup_sockets();
3945 static void __exit l2cap_exit(void)
3947 debugfs_remove(l2cap_debugfs);
3949 flush_workqueue(_busy_wq);
3950 destroy_workqueue(_busy_wq);
3952 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3953 BT_ERR("L2CAP protocol unregistration failed");
3955 l2cap_cleanup_sockets();
3958 void l2cap_load(void)
3960 /* Dummy function to trigger automatic L2CAP module loading by
3961 * other modules that use L2CAP sockets but don't use any other
3962 * symbols from it. */
3964 EXPORT_SYMBOL(l2cap_load);
3966 module_init(l2cap_init);
3967 module_exit(l2cap_exit);
3969 module_param(disable_ertm, bool, 0644);
3970 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
3972 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3973 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3974 MODULE_VERSION(VERSION);
3975 MODULE_LICENSE("GPL");
3976 MODULE_ALIAS("bt-proto-0");