2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth L2CAP core and sockets. */
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/interrupt.h>
40 #include <linux/socket.h>
41 #include <linux/skbuff.h>
42 #include <linux/list.h>
43 #include <linux/device.h>
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #include <linux/uaccess.h>
47 #include <linux/crc16.h>
50 #include <asm/system.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55 #include <net/bluetooth/l2cap.h>
57 #define VERSION "2.14"
59 static int enable_ertm = 0;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct workqueue_struct *_busy_wq;
68 static struct bt_sock_list l2cap_sk_list = {
69 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
72 static void l2cap_busy_work(struct work_struct *work);
74 static void __l2cap_sock_close(struct sock *sk, int reason);
75 static void l2cap_sock_close(struct sock *sk);
76 static void l2cap_sock_kill(struct sock *sk);
78 static int l2cap_build_conf_req(struct sock *sk, void *data);
79 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
80 u8 code, u8 ident, u16 dlen, void *data);
82 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84 /* ---- L2CAP timers ---- */
85 static void l2cap_sock_timeout(unsigned long arg)
87 struct sock *sk = (struct sock *) arg;
90 BT_DBG("sock %p state %d", sk, sk->sk_state);
94 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
95 reason = ECONNREFUSED;
96 else if (sk->sk_state == BT_CONNECT &&
97 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
98 reason = ECONNREFUSED;
102 __l2cap_sock_close(sk, reason);
110 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
112 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
113 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
116 static void l2cap_sock_clear_timer(struct sock *sk)
118 BT_DBG("sock %p state %d", sk, sk->sk_state);
119 sk_stop_timer(sk, &sk->sk_timer);
122 /* ---- L2CAP channels ---- */
123 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->dcid == cid)
133 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
137 if (l2cap_pi(s)->scid == cid)
143 /* Find channel with given SCID.
144 * Returns locked socket */
145 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
149 s = __l2cap_get_chan_by_scid(l, cid);
152 read_unlock(&l->lock);
156 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
160 if (l2cap_pi(s)->ident == ident)
166 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
170 s = __l2cap_get_chan_by_ident(l, ident);
173 read_unlock(&l->lock);
177 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
179 u16 cid = L2CAP_CID_DYN_START;
181 for (; cid < L2CAP_CID_DYN_END; cid++) {
182 if (!__l2cap_get_chan_by_scid(l, cid))
189 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
194 l2cap_pi(l->head)->prev_c = sk;
196 l2cap_pi(sk)->next_c = l->head;
197 l2cap_pi(sk)->prev_c = NULL;
201 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
203 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
205 write_lock_bh(&l->lock);
210 l2cap_pi(next)->prev_c = prev;
212 l2cap_pi(prev)->next_c = next;
213 write_unlock_bh(&l->lock);
218 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
220 struct l2cap_chan_list *l = &conn->chan_list;
222 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
223 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
225 conn->disc_reason = 0x13;
227 l2cap_pi(sk)->conn = conn;
229 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
230 /* Alloc CID for connection-oriented socket */
231 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
232 } else if (sk->sk_type == SOCK_DGRAM) {
233 /* Connectionless socket */
234 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
235 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 /* Raw socket can send/recv signalling messages only */
239 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
240 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
244 __l2cap_chan_link(l, sk);
247 bt_accept_enqueue(parent, sk);
251 * Must be called on the locked socket. */
252 static void l2cap_chan_del(struct sock *sk, int err)
254 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
255 struct sock *parent = bt_sk(sk)->parent;
257 l2cap_sock_clear_timer(sk);
259 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
262 /* Unlink from channel list */
263 l2cap_chan_unlink(&conn->chan_list, sk);
264 l2cap_pi(sk)->conn = NULL;
265 hci_conn_put(conn->hcon);
268 sk->sk_state = BT_CLOSED;
269 sock_set_flag(sk, SOCK_ZAPPED);
275 bt_accept_unlink(sk);
276 parent->sk_data_ready(parent, 0);
278 sk->sk_state_change(sk);
280 skb_queue_purge(TX_QUEUE(sk));
282 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
283 struct srej_list *l, *tmp;
285 del_timer(&l2cap_pi(sk)->retrans_timer);
286 del_timer(&l2cap_pi(sk)->monitor_timer);
287 del_timer(&l2cap_pi(sk)->ack_timer);
289 skb_queue_purge(SREJ_QUEUE(sk));
290 skb_queue_purge(BUSY_QUEUE(sk));
292 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
299 /* Service level security */
300 static inline int l2cap_check_security(struct sock *sk)
302 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
305 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
306 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
307 auth_type = HCI_AT_NO_BONDING_MITM;
309 auth_type = HCI_AT_NO_BONDING;
311 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
312 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 switch (l2cap_pi(sk)->sec_level) {
315 case BT_SECURITY_HIGH:
316 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 case BT_SECURITY_MEDIUM:
319 auth_type = HCI_AT_GENERAL_BONDING;
322 auth_type = HCI_AT_NO_BONDING;
327 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
331 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
335 /* Get next available identificator.
336 * 1 - 128 are used by kernel.
337 * 129 - 199 are reserved.
338 * 200 - 254 are used by utilities like l2ping, etc.
341 spin_lock_bh(&conn->lock);
343 if (++conn->tx_ident > 128)
348 spin_unlock_bh(&conn->lock);
353 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
355 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
357 BT_DBG("code 0x%2.2x", code);
362 hci_send_acl(conn->hcon, skb, 0);
365 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
368 struct l2cap_hdr *lh;
369 struct l2cap_conn *conn = pi->conn;
370 struct sock *sk = (struct sock *)pi;
371 int count, hlen = L2CAP_HDR_SIZE + 2;
373 if (sk->sk_state != BT_CONNECTED)
376 if (pi->fcs == L2CAP_FCS_CRC16)
379 BT_DBG("pi %p, control 0x%2.2x", pi, control);
381 count = min_t(unsigned int, conn->mtu, hlen);
382 control |= L2CAP_CTRL_FRAME_TYPE;
384 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
385 control |= L2CAP_CTRL_FINAL;
386 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
389 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
390 control |= L2CAP_CTRL_POLL;
391 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
394 skb = bt_skb_alloc(count, GFP_ATOMIC);
398 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
399 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
400 lh->cid = cpu_to_le16(pi->dcid);
401 put_unaligned_le16(control, skb_put(skb, 2));
403 if (pi->fcs == L2CAP_FCS_CRC16) {
404 u16 fcs = crc16(0, (u8 *)lh, count - 2);
405 put_unaligned_le16(fcs, skb_put(skb, 2));
408 hci_send_acl(pi->conn->hcon, skb, 0);
411 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
413 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
414 control |= L2CAP_SUPER_RCV_NOT_READY;
415 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 control |= L2CAP_SUPER_RCV_READY;
419 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
421 l2cap_send_sframe(pi, control);
424 static inline int __l2cap_no_conn_pending(struct sock *sk)
426 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
429 static void l2cap_do_start(struct sock *sk)
431 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
433 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
434 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
437 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
438 struct l2cap_conn_req req;
439 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
440 req.psm = l2cap_pi(sk)->psm;
442 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
443 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
445 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
446 L2CAP_CONN_REQ, sizeof(req), &req);
449 struct l2cap_info_req req;
450 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
452 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
453 conn->info_ident = l2cap_get_ident(conn);
455 mod_timer(&conn->info_timer, jiffies +
456 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
458 l2cap_send_cmd(conn, conn->info_ident,
459 L2CAP_INFO_REQ, sizeof(req), &req);
463 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
465 u32 local_feat_mask = l2cap_feat_mask;
467 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
470 case L2CAP_MODE_ERTM:
471 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
472 case L2CAP_MODE_STREAMING:
473 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
479 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
481 struct l2cap_disconn_req req;
486 skb_queue_purge(TX_QUEUE(sk));
488 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
489 del_timer(&l2cap_pi(sk)->retrans_timer);
490 del_timer(&l2cap_pi(sk)->monitor_timer);
491 del_timer(&l2cap_pi(sk)->ack_timer);
494 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
495 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
496 l2cap_send_cmd(conn, l2cap_get_ident(conn),
497 L2CAP_DISCONN_REQ, sizeof(req), &req);
499 sk->sk_state = BT_DISCONN;
503 /* ---- L2CAP connections ---- */
504 static void l2cap_conn_start(struct l2cap_conn *conn)
506 struct l2cap_chan_list *l = &conn->chan_list;
507 struct sock_del_list del, *tmp1, *tmp2;
510 BT_DBG("conn %p", conn);
512 INIT_LIST_HEAD(&del.list);
516 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 if (sk->sk_type != SOCK_SEQPACKET &&
520 sk->sk_type != SOCK_STREAM) {
525 if (sk->sk_state == BT_CONNECT) {
526 struct l2cap_conn_req req;
528 if (!l2cap_check_security(sk) ||
529 !__l2cap_no_conn_pending(sk)) {
534 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 && l2cap_pi(sk)->conf_state &
537 L2CAP_CONF_STATE2_DEVICE) {
538 tmp1 = kzalloc(sizeof(struct sock_del_list),
541 list_add_tail(&tmp1->list, &del.list);
546 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
547 req.psm = l2cap_pi(sk)->psm;
549 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
550 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
553 L2CAP_CONN_REQ, sizeof(req), &req);
555 } else if (sk->sk_state == BT_CONNECT2) {
556 struct l2cap_conn_rsp rsp;
558 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
559 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
561 if (l2cap_check_security(sk)) {
562 if (bt_sk(sk)->defer_setup) {
563 struct sock *parent = bt_sk(sk)->parent;
564 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
565 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
566 parent->sk_data_ready(parent, 0);
569 sk->sk_state = BT_CONFIG;
570 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
571 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
574 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
575 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
578 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
579 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
582 rsp.result != L2CAP_CR_SUCCESS) {
587 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
588 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
589 l2cap_build_conf_req(sk, buf), buf);
590 l2cap_pi(sk)->num_conf_req++;
596 read_unlock(&l->lock);
598 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
599 bh_lock_sock(tmp1->sk);
600 __l2cap_sock_close(tmp1->sk, ECONNRESET);
601 bh_unlock_sock(tmp1->sk);
602 list_del(&tmp1->list);
607 static void l2cap_conn_ready(struct l2cap_conn *conn)
609 struct l2cap_chan_list *l = &conn->chan_list;
612 BT_DBG("conn %p", conn);
616 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
619 if (sk->sk_type != SOCK_SEQPACKET &&
620 sk->sk_type != SOCK_STREAM) {
621 l2cap_sock_clear_timer(sk);
622 sk->sk_state = BT_CONNECTED;
623 sk->sk_state_change(sk);
624 } else if (sk->sk_state == BT_CONNECT)
630 read_unlock(&l->lock);
633 /* Notify sockets that we cannot guaranty reliability anymore */
634 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
636 struct l2cap_chan_list *l = &conn->chan_list;
639 BT_DBG("conn %p", conn);
643 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
644 if (l2cap_pi(sk)->force_reliable)
648 read_unlock(&l->lock);
651 static void l2cap_info_timeout(unsigned long arg)
653 struct l2cap_conn *conn = (void *) arg;
655 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
656 conn->info_ident = 0;
658 l2cap_conn_start(conn);
661 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
663 struct l2cap_conn *conn = hcon->l2cap_data;
668 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
672 hcon->l2cap_data = conn;
675 BT_DBG("hcon %p conn %p", hcon, conn);
677 conn->mtu = hcon->hdev->acl_mtu;
678 conn->src = &hcon->hdev->bdaddr;
679 conn->dst = &hcon->dst;
683 spin_lock_init(&conn->lock);
684 rwlock_init(&conn->chan_list.lock);
686 setup_timer(&conn->info_timer, l2cap_info_timeout,
687 (unsigned long) conn);
689 conn->disc_reason = 0x13;
694 static void l2cap_conn_del(struct hci_conn *hcon, int err)
696 struct l2cap_conn *conn = hcon->l2cap_data;
702 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
704 kfree_skb(conn->rx_skb);
707 while ((sk = conn->chan_list.head)) {
709 l2cap_chan_del(sk, err);
714 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
715 del_timer_sync(&conn->info_timer);
717 hcon->l2cap_data = NULL;
721 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
723 struct l2cap_chan_list *l = &conn->chan_list;
724 write_lock_bh(&l->lock);
725 __l2cap_chan_add(conn, sk, parent);
726 write_unlock_bh(&l->lock);
729 /* ---- Socket interface ---- */
730 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
733 struct hlist_node *node;
734 sk_for_each(sk, node, &l2cap_sk_list.head)
735 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
742 /* Find socket with psm and source bdaddr.
743 * Returns closest match.
745 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
747 struct sock *sk = NULL, *sk1 = NULL;
748 struct hlist_node *node;
750 sk_for_each(sk, node, &l2cap_sk_list.head) {
751 if (state && sk->sk_state != state)
754 if (l2cap_pi(sk)->psm == psm) {
756 if (!bacmp(&bt_sk(sk)->src, src))
760 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
764 return node ? sk : sk1;
767 /* Find socket with given address (psm, src).
768 * Returns locked socket */
769 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
772 read_lock(&l2cap_sk_list.lock);
773 s = __l2cap_get_sock_by_psm(state, psm, src);
776 read_unlock(&l2cap_sk_list.lock);
780 static void l2cap_sock_destruct(struct sock *sk)
784 skb_queue_purge(&sk->sk_receive_queue);
785 skb_queue_purge(&sk->sk_write_queue);
788 static void l2cap_sock_cleanup_listen(struct sock *parent)
792 BT_DBG("parent %p", parent);
794 /* Close not yet accepted channels */
795 while ((sk = bt_accept_dequeue(parent, NULL)))
796 l2cap_sock_close(sk);
798 parent->sk_state = BT_CLOSED;
799 sock_set_flag(parent, SOCK_ZAPPED);
802 /* Kill socket (only if zapped and orphan)
803 * Must be called on unlocked socket.
805 static void l2cap_sock_kill(struct sock *sk)
807 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
810 BT_DBG("sk %p state %d", sk, sk->sk_state);
812 /* Kill poor orphan */
813 bt_sock_unlink(&l2cap_sk_list, sk);
814 sock_set_flag(sk, SOCK_DEAD);
818 static void __l2cap_sock_close(struct sock *sk, int reason)
820 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
822 switch (sk->sk_state) {
824 l2cap_sock_cleanup_listen(sk);
829 if (sk->sk_type == SOCK_SEQPACKET ||
830 sk->sk_type == SOCK_STREAM) {
831 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
833 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
834 l2cap_send_disconn_req(conn, sk, reason);
836 l2cap_chan_del(sk, reason);
840 if (sk->sk_type == SOCK_SEQPACKET ||
841 sk->sk_type == SOCK_STREAM) {
842 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
843 struct l2cap_conn_rsp rsp;
846 if (bt_sk(sk)->defer_setup)
847 result = L2CAP_CR_SEC_BLOCK;
849 result = L2CAP_CR_BAD_PSM;
851 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
852 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
853 rsp.result = cpu_to_le16(result);
854 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
855 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
856 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 l2cap_chan_del(sk, reason);
863 l2cap_chan_del(sk, reason);
867 sock_set_flag(sk, SOCK_ZAPPED);
872 /* Must be called on unlocked socket. */
873 static void l2cap_sock_close(struct sock *sk)
875 l2cap_sock_clear_timer(sk);
877 __l2cap_sock_close(sk, ECONNRESET);
882 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
884 struct l2cap_pinfo *pi = l2cap_pi(sk);
889 sk->sk_type = parent->sk_type;
890 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
892 pi->imtu = l2cap_pi(parent)->imtu;
893 pi->omtu = l2cap_pi(parent)->omtu;
894 pi->conf_state = l2cap_pi(parent)->conf_state;
895 pi->mode = l2cap_pi(parent)->mode;
896 pi->fcs = l2cap_pi(parent)->fcs;
897 pi->max_tx = l2cap_pi(parent)->max_tx;
898 pi->tx_win = l2cap_pi(parent)->tx_win;
899 pi->sec_level = l2cap_pi(parent)->sec_level;
900 pi->role_switch = l2cap_pi(parent)->role_switch;
901 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 pi->imtu = L2CAP_DEFAULT_MTU;
905 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
906 pi->mode = L2CAP_MODE_ERTM;
907 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 pi->mode = L2CAP_MODE_BASIC;
911 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
912 pi->fcs = L2CAP_FCS_CRC16;
913 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
914 pi->sec_level = BT_SECURITY_LOW;
916 pi->force_reliable = 0;
919 /* Default config options */
921 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
922 skb_queue_head_init(TX_QUEUE(sk));
923 skb_queue_head_init(SREJ_QUEUE(sk));
924 skb_queue_head_init(BUSY_QUEUE(sk));
925 INIT_LIST_HEAD(SREJ_LIST(sk));
928 static struct proto l2cap_proto = {
930 .owner = THIS_MODULE,
931 .obj_size = sizeof(struct l2cap_pinfo)
934 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
938 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
942 sock_init_data(sock, sk);
943 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
945 sk->sk_destruct = l2cap_sock_destruct;
946 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
948 sock_reset_flag(sk, SOCK_ZAPPED);
950 sk->sk_protocol = proto;
951 sk->sk_state = BT_OPEN;
953 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
955 bt_sock_link(&l2cap_sk_list, sk);
959 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
964 BT_DBG("sock %p", sock);
966 sock->state = SS_UNCONNECTED;
968 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
969 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
970 return -ESOCKTNOSUPPORT;
972 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
975 sock->ops = &l2cap_sock_ops;
977 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
981 l2cap_sock_init(sk, NULL);
985 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
987 struct sock *sk = sock->sk;
988 struct sockaddr_l2 la;
993 if (!addr || addr->sa_family != AF_BLUETOOTH)
996 memset(&la, 0, sizeof(la));
997 len = min_t(unsigned int, sizeof(la), alen);
998 memcpy(&la, addr, len);
1005 if (sk->sk_state != BT_OPEN) {
1010 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1011 !capable(CAP_NET_BIND_SERVICE)) {
1016 write_lock_bh(&l2cap_sk_list.lock);
1018 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1021 /* Save source address */
1022 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1023 l2cap_pi(sk)->psm = la.l2_psm;
1024 l2cap_pi(sk)->sport = la.l2_psm;
1025 sk->sk_state = BT_BOUND;
1027 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1028 __le16_to_cpu(la.l2_psm) == 0x0003)
1029 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1032 write_unlock_bh(&l2cap_sk_list.lock);
1039 static int l2cap_do_connect(struct sock *sk)
1041 bdaddr_t *src = &bt_sk(sk)->src;
1042 bdaddr_t *dst = &bt_sk(sk)->dst;
1043 struct l2cap_conn *conn;
1044 struct hci_conn *hcon;
1045 struct hci_dev *hdev;
1049 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1052 hdev = hci_get_route(dst, src);
1054 return -EHOSTUNREACH;
1056 hci_dev_lock_bh(hdev);
1060 if (sk->sk_type == SOCK_RAW) {
1061 switch (l2cap_pi(sk)->sec_level) {
1062 case BT_SECURITY_HIGH:
1063 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1065 case BT_SECURITY_MEDIUM:
1066 auth_type = HCI_AT_DEDICATED_BONDING;
1069 auth_type = HCI_AT_NO_BONDING;
1072 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1073 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1074 auth_type = HCI_AT_NO_BONDING_MITM;
1076 auth_type = HCI_AT_NO_BONDING;
1078 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1079 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1081 switch (l2cap_pi(sk)->sec_level) {
1082 case BT_SECURITY_HIGH:
1083 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1085 case BT_SECURITY_MEDIUM:
1086 auth_type = HCI_AT_GENERAL_BONDING;
1089 auth_type = HCI_AT_NO_BONDING;
1094 hcon = hci_connect(hdev, ACL_LINK, dst,
1095 l2cap_pi(sk)->sec_level, auth_type);
1099 conn = l2cap_conn_add(hcon, 0);
1107 /* Update source addr of the socket */
1108 bacpy(src, conn->src);
1110 l2cap_chan_add(conn, sk, NULL);
1112 sk->sk_state = BT_CONNECT;
1113 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1115 if (hcon->state == BT_CONNECTED) {
1116 if (sk->sk_type != SOCK_SEQPACKET &&
1117 sk->sk_type != SOCK_STREAM) {
1118 l2cap_sock_clear_timer(sk);
1119 sk->sk_state = BT_CONNECTED;
1125 hci_dev_unlock_bh(hdev);
1130 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1132 struct sock *sk = sock->sk;
1133 struct sockaddr_l2 la;
1136 BT_DBG("sk %p", sk);
1138 if (!addr || alen < sizeof(addr->sa_family) ||
1139 addr->sa_family != AF_BLUETOOTH)
1142 memset(&la, 0, sizeof(la));
1143 len = min_t(unsigned int, sizeof(la), alen);
1144 memcpy(&la, addr, len);
1151 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1157 switch (l2cap_pi(sk)->mode) {
1158 case L2CAP_MODE_BASIC:
1160 case L2CAP_MODE_ERTM:
1161 case L2CAP_MODE_STREAMING:
1170 switch (sk->sk_state) {
1174 /* Already connecting */
1178 /* Already connected */
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1194 l2cap_pi(sk)->psm = la.l2_psm;
1196 err = l2cap_do_connect(sk);
1201 err = bt_sock_wait_state(sk, BT_CONNECTED,
1202 sock_sndtimeo(sk, flags & O_NONBLOCK));
1208 static int l2cap_sock_listen(struct socket *sock, int backlog)
1210 struct sock *sk = sock->sk;
1213 BT_DBG("sk %p backlog %d", sk, backlog);
1217 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1218 || sk->sk_state != BT_BOUND) {
1223 switch (l2cap_pi(sk)->mode) {
1224 case L2CAP_MODE_BASIC:
1226 case L2CAP_MODE_ERTM:
1227 case L2CAP_MODE_STREAMING:
1236 if (!l2cap_pi(sk)->psm) {
1237 bdaddr_t *src = &bt_sk(sk)->src;
1242 write_lock_bh(&l2cap_sk_list.lock);
1244 for (psm = 0x1001; psm < 0x1100; psm += 2)
1245 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1246 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1247 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1252 write_unlock_bh(&l2cap_sk_list.lock);
1258 sk->sk_max_ack_backlog = backlog;
1259 sk->sk_ack_backlog = 0;
1260 sk->sk_state = BT_LISTEN;
1267 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1269 DECLARE_WAITQUEUE(wait, current);
1270 struct sock *sk = sock->sk, *nsk;
1274 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1276 if (sk->sk_state != BT_LISTEN) {
1281 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1283 BT_DBG("sk %p timeo %ld", sk, timeo);
1285 /* Wait for an incoming connection. (wake-one). */
1286 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1287 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1288 set_current_state(TASK_INTERRUPTIBLE);
1295 timeo = schedule_timeout(timeo);
1296 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1298 if (sk->sk_state != BT_LISTEN) {
1303 if (signal_pending(current)) {
1304 err = sock_intr_errno(timeo);
1308 set_current_state(TASK_RUNNING);
1309 remove_wait_queue(sk_sleep(sk), &wait);
1314 newsock->state = SS_CONNECTED;
1316 BT_DBG("new socket %p", nsk);
1323 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1325 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1326 struct sock *sk = sock->sk;
1328 BT_DBG("sock %p, sk %p", sock, sk);
1330 addr->sa_family = AF_BLUETOOTH;
1331 *len = sizeof(struct sockaddr_l2);
1334 la->l2_psm = l2cap_pi(sk)->psm;
1335 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1336 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1338 la->l2_psm = l2cap_pi(sk)->sport;
1339 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1340 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1346 static int __l2cap_wait_ack(struct sock *sk)
1348 DECLARE_WAITQUEUE(wait, current);
1352 add_wait_queue(sk_sleep(sk), &wait);
1353 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1354 set_current_state(TASK_INTERRUPTIBLE);
1359 if (signal_pending(current)) {
1360 err = sock_intr_errno(timeo);
1365 timeo = schedule_timeout(timeo);
1368 err = sock_error(sk);
1372 set_current_state(TASK_RUNNING);
1373 remove_wait_queue(sk_sleep(sk), &wait);
1377 static void l2cap_monitor_timeout(unsigned long arg)
1379 struct sock *sk = (void *) arg;
1381 BT_DBG("sk %p", sk);
1384 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1385 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1390 l2cap_pi(sk)->retry_count++;
1391 __mod_monitor_timer();
1393 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1397 static void l2cap_retrans_timeout(unsigned long arg)
1399 struct sock *sk = (void *) arg;
1401 BT_DBG("sk %p", sk);
1404 l2cap_pi(sk)->retry_count = 1;
1405 __mod_monitor_timer();
1407 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1409 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1413 static void l2cap_drop_acked_frames(struct sock *sk)
1415 struct sk_buff *skb;
1417 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1418 l2cap_pi(sk)->unacked_frames) {
1419 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1422 skb = skb_dequeue(TX_QUEUE(sk));
1425 l2cap_pi(sk)->unacked_frames--;
1428 if (!l2cap_pi(sk)->unacked_frames)
1429 del_timer(&l2cap_pi(sk)->retrans_timer);
1432 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1434 struct l2cap_pinfo *pi = l2cap_pi(sk);
1436 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1438 hci_send_acl(pi->conn->hcon, skb, 0);
1441 static void l2cap_streaming_send(struct sock *sk)
1443 struct sk_buff *skb, *tx_skb;
1444 struct l2cap_pinfo *pi = l2cap_pi(sk);
1447 while ((skb = sk->sk_send_head)) {
1448 tx_skb = skb_clone(skb, GFP_ATOMIC);
1450 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1451 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1452 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1454 if (pi->fcs == L2CAP_FCS_CRC16) {
1455 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1456 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1459 l2cap_do_send(sk, tx_skb);
1461 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1463 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1464 sk->sk_send_head = NULL;
1466 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1468 skb = skb_dequeue(TX_QUEUE(sk));
1473 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1475 struct l2cap_pinfo *pi = l2cap_pi(sk);
1476 struct sk_buff *skb, *tx_skb;
1479 skb = skb_peek(TX_QUEUE(sk));
1484 if (bt_cb(skb)->tx_seq == tx_seq)
1487 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1490 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1492 if (pi->remote_max_tx &&
1493 bt_cb(skb)->retries == pi->remote_max_tx) {
1494 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1498 tx_skb = skb_clone(skb, GFP_ATOMIC);
1499 bt_cb(skb)->retries++;
1500 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1502 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1503 control |= L2CAP_CTRL_FINAL;
1504 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1507 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1508 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1510 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1512 if (pi->fcs == L2CAP_FCS_CRC16) {
1513 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1514 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1517 l2cap_do_send(sk, tx_skb);
1520 static int l2cap_ertm_send(struct sock *sk)
1522 struct sk_buff *skb, *tx_skb;
1523 struct l2cap_pinfo *pi = l2cap_pi(sk);
1527 if (sk->sk_state != BT_CONNECTED)
1530 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1532 if (pi->remote_max_tx &&
1533 bt_cb(skb)->retries == pi->remote_max_tx) {
1534 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1538 tx_skb = skb_clone(skb, GFP_ATOMIC);
1540 bt_cb(skb)->retries++;
1542 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1543 control &= L2CAP_CTRL_SAR;
1545 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1546 control |= L2CAP_CTRL_FINAL;
1547 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1549 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1550 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1551 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1554 if (pi->fcs == L2CAP_FCS_CRC16) {
1555 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1556 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1559 l2cap_do_send(sk, tx_skb);
1561 __mod_retrans_timer();
1563 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1564 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1566 pi->unacked_frames++;
1569 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1570 sk->sk_send_head = NULL;
1572 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1580 static int l2cap_retransmit_frames(struct sock *sk)
1582 struct l2cap_pinfo *pi = l2cap_pi(sk);
1585 if (!skb_queue_empty(TX_QUEUE(sk)))
1586 sk->sk_send_head = TX_QUEUE(sk)->next;
1588 pi->next_tx_seq = pi->expected_ack_seq;
1589 ret = l2cap_ertm_send(sk);
1593 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1595 struct sock *sk = (struct sock *)pi;
1598 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1600 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1601 control |= L2CAP_SUPER_RCV_NOT_READY;
1602 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1603 l2cap_send_sframe(pi, control);
1607 if (l2cap_ertm_send(sk) > 0)
1610 control |= L2CAP_SUPER_RCV_READY;
1611 l2cap_send_sframe(pi, control);
1614 static void l2cap_send_srejtail(struct sock *sk)
1616 struct srej_list *tail;
1619 control = L2CAP_SUPER_SELECT_REJECT;
1620 control |= L2CAP_CTRL_FINAL;
1622 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1623 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1625 l2cap_send_sframe(l2cap_pi(sk), control);
1628 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1630 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1631 struct sk_buff **frag;
1634 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1640 /* Continuation fragments (no L2CAP header) */
1641 frag = &skb_shinfo(skb)->frag_list;
1643 count = min_t(unsigned int, conn->mtu, len);
1645 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1648 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1654 frag = &(*frag)->next;
1660 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1662 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1663 struct sk_buff *skb;
1664 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1665 struct l2cap_hdr *lh;
1667 BT_DBG("sk %p len %d", sk, (int)len);
1669 count = min_t(unsigned int, (conn->mtu - hlen), len);
1670 skb = bt_skb_send_alloc(sk, count + hlen,
1671 msg->msg_flags & MSG_DONTWAIT, &err);
1673 return ERR_PTR(-ENOMEM);
1675 /* Create L2CAP header */
1676 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1677 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1678 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1679 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1681 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1682 if (unlikely(err < 0)) {
1684 return ERR_PTR(err);
1689 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1691 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1692 struct sk_buff *skb;
1693 int err, count, hlen = L2CAP_HDR_SIZE;
1694 struct l2cap_hdr *lh;
1696 BT_DBG("sk %p len %d", sk, (int)len);
1698 count = min_t(unsigned int, (conn->mtu - hlen), len);
1699 skb = bt_skb_send_alloc(sk, count + hlen,
1700 msg->msg_flags & MSG_DONTWAIT, &err);
1702 return ERR_PTR(-ENOMEM);
1704 /* Create L2CAP header */
1705 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1706 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1707 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1709 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1710 if (unlikely(err < 0)) {
1712 return ERR_PTR(err);
1717 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1719 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1720 struct sk_buff *skb;
1721 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1722 struct l2cap_hdr *lh;
1724 BT_DBG("sk %p len %d", sk, (int)len);
1727 return ERR_PTR(-ENOTCONN);
1732 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1735 count = min_t(unsigned int, (conn->mtu - hlen), len);
1736 skb = bt_skb_send_alloc(sk, count + hlen,
1737 msg->msg_flags & MSG_DONTWAIT, &err);
1739 return ERR_PTR(-ENOMEM);
1741 /* Create L2CAP header */
1742 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1743 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1744 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1745 put_unaligned_le16(control, skb_put(skb, 2));
1747 put_unaligned_le16(sdulen, skb_put(skb, 2));
1749 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1750 if (unlikely(err < 0)) {
1752 return ERR_PTR(err);
1755 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1756 put_unaligned_le16(0, skb_put(skb, 2));
1758 bt_cb(skb)->retries = 0;
1762 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1764 struct l2cap_pinfo *pi = l2cap_pi(sk);
1765 struct sk_buff *skb;
1766 struct sk_buff_head sar_queue;
1770 skb_queue_head_init(&sar_queue);
1771 control = L2CAP_SDU_START;
1772 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1774 return PTR_ERR(skb);
1776 __skb_queue_tail(&sar_queue, skb);
1777 len -= pi->remote_mps;
1778 size += pi->remote_mps;
1783 if (len > pi->remote_mps) {
1784 control = L2CAP_SDU_CONTINUE;
1785 buflen = pi->remote_mps;
1787 control = L2CAP_SDU_END;
1791 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1793 skb_queue_purge(&sar_queue);
1794 return PTR_ERR(skb);
1797 __skb_queue_tail(&sar_queue, skb);
1801 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1802 if (sk->sk_send_head == NULL)
1803 sk->sk_send_head = sar_queue.next;
1808 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1810 struct sock *sk = sock->sk;
1811 struct l2cap_pinfo *pi = l2cap_pi(sk);
1812 struct sk_buff *skb;
1816 BT_DBG("sock %p, sk %p", sock, sk);
1818 err = sock_error(sk);
1822 if (msg->msg_flags & MSG_OOB)
1827 if (sk->sk_state != BT_CONNECTED) {
1832 /* Connectionless channel */
1833 if (sk->sk_type == SOCK_DGRAM) {
1834 skb = l2cap_create_connless_pdu(sk, msg, len);
1838 l2cap_do_send(sk, skb);
1845 case L2CAP_MODE_BASIC:
1846 /* Check outgoing MTU */
1847 if (len > pi->omtu) {
1852 /* Create a basic PDU */
1853 skb = l2cap_create_basic_pdu(sk, msg, len);
1859 l2cap_do_send(sk, skb);
1863 case L2CAP_MODE_ERTM:
1864 case L2CAP_MODE_STREAMING:
1865 /* Entire SDU fits into one PDU */
1866 if (len <= pi->remote_mps) {
1867 control = L2CAP_SDU_UNSEGMENTED;
1868 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1873 __skb_queue_tail(TX_QUEUE(sk), skb);
1875 if (sk->sk_send_head == NULL)
1876 sk->sk_send_head = skb;
1879 /* Segment SDU into multiples PDUs */
1880 err = l2cap_sar_segment_sdu(sk, msg, len);
1885 if (pi->mode == L2CAP_MODE_STREAMING) {
1886 l2cap_streaming_send(sk);
1888 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1889 pi->conn_state && L2CAP_CONN_WAIT_F) {
1893 err = l2cap_ertm_send(sk);
1901 BT_DBG("bad state %1.1x", pi->mode);
1910 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1912 struct sock *sk = sock->sk;
1916 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1917 struct l2cap_conn_rsp rsp;
1918 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1921 sk->sk_state = BT_CONFIG;
1923 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1924 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1925 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1926 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1927 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1928 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1930 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1935 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1936 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1937 l2cap_build_conf_req(sk, buf), buf);
1938 l2cap_pi(sk)->num_conf_req++;
1946 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1949 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1951 struct sock *sk = sock->sk;
1952 struct l2cap_options opts;
1956 BT_DBG("sk %p", sk);
1962 opts.imtu = l2cap_pi(sk)->imtu;
1963 opts.omtu = l2cap_pi(sk)->omtu;
1964 opts.flush_to = l2cap_pi(sk)->flush_to;
1965 opts.mode = l2cap_pi(sk)->mode;
1966 opts.fcs = l2cap_pi(sk)->fcs;
1967 opts.max_tx = l2cap_pi(sk)->max_tx;
1968 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1970 len = min_t(unsigned int, sizeof(opts), optlen);
1971 if (copy_from_user((char *) &opts, optval, len)) {
1976 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1981 l2cap_pi(sk)->mode = opts.mode;
1982 switch (l2cap_pi(sk)->mode) {
1983 case L2CAP_MODE_BASIC:
1984 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1986 case L2CAP_MODE_ERTM:
1987 case L2CAP_MODE_STREAMING:
1996 l2cap_pi(sk)->imtu = opts.imtu;
1997 l2cap_pi(sk)->omtu = opts.omtu;
1998 l2cap_pi(sk)->fcs = opts.fcs;
1999 l2cap_pi(sk)->max_tx = opts.max_tx;
2000 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2004 if (get_user(opt, (u32 __user *) optval)) {
2009 if (opt & L2CAP_LM_AUTH)
2010 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2011 if (opt & L2CAP_LM_ENCRYPT)
2012 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2013 if (opt & L2CAP_LM_SECURE)
2014 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2016 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2017 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2029 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2031 struct sock *sk = sock->sk;
2032 struct bt_security sec;
2036 BT_DBG("sk %p", sk);
2038 if (level == SOL_L2CAP)
2039 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2041 if (level != SOL_BLUETOOTH)
2042 return -ENOPROTOOPT;
2048 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2049 && sk->sk_type != SOCK_RAW) {
2054 sec.level = BT_SECURITY_LOW;
2056 len = min_t(unsigned int, sizeof(sec), optlen);
2057 if (copy_from_user((char *) &sec, optval, len)) {
2062 if (sec.level < BT_SECURITY_LOW ||
2063 sec.level > BT_SECURITY_HIGH) {
2068 l2cap_pi(sk)->sec_level = sec.level;
2071 case BT_DEFER_SETUP:
2072 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2077 if (get_user(opt, (u32 __user *) optval)) {
2082 bt_sk(sk)->defer_setup = opt;
2094 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2096 struct sock *sk = sock->sk;
2097 struct l2cap_options opts;
2098 struct l2cap_conninfo cinfo;
2102 BT_DBG("sk %p", sk);
2104 if (get_user(len, optlen))
2111 opts.imtu = l2cap_pi(sk)->imtu;
2112 opts.omtu = l2cap_pi(sk)->omtu;
2113 opts.flush_to = l2cap_pi(sk)->flush_to;
2114 opts.mode = l2cap_pi(sk)->mode;
2115 opts.fcs = l2cap_pi(sk)->fcs;
2116 opts.max_tx = l2cap_pi(sk)->max_tx;
2117 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2119 len = min_t(unsigned int, len, sizeof(opts));
2120 if (copy_to_user(optval, (char *) &opts, len))
2126 switch (l2cap_pi(sk)->sec_level) {
2127 case BT_SECURITY_LOW:
2128 opt = L2CAP_LM_AUTH;
2130 case BT_SECURITY_MEDIUM:
2131 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2133 case BT_SECURITY_HIGH:
2134 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2142 if (l2cap_pi(sk)->role_switch)
2143 opt |= L2CAP_LM_MASTER;
2145 if (l2cap_pi(sk)->force_reliable)
2146 opt |= L2CAP_LM_RELIABLE;
2148 if (put_user(opt, (u32 __user *) optval))
2152 case L2CAP_CONNINFO:
2153 if (sk->sk_state != BT_CONNECTED &&
2154 !(sk->sk_state == BT_CONNECT2 &&
2155 bt_sk(sk)->defer_setup)) {
2160 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2161 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2163 len = min_t(unsigned int, len, sizeof(cinfo));
2164 if (copy_to_user(optval, (char *) &cinfo, len))
2178 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2180 struct sock *sk = sock->sk;
2181 struct bt_security sec;
2184 BT_DBG("sk %p", sk);
2186 if (level == SOL_L2CAP)
2187 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2189 if (level != SOL_BLUETOOTH)
2190 return -ENOPROTOOPT;
2192 if (get_user(len, optlen))
2199 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2200 && sk->sk_type != SOCK_RAW) {
2205 sec.level = l2cap_pi(sk)->sec_level;
2207 len = min_t(unsigned int, len, sizeof(sec));
2208 if (copy_to_user(optval, (char *) &sec, len))
2213 case BT_DEFER_SETUP:
2214 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2219 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2233 static int l2cap_sock_shutdown(struct socket *sock, int how)
2235 struct sock *sk = sock->sk;
2238 BT_DBG("sock %p, sk %p", sock, sk);
2244 if (!sk->sk_shutdown) {
2245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2246 err = __l2cap_wait_ack(sk);
2248 sk->sk_shutdown = SHUTDOWN_MASK;
2249 l2cap_sock_clear_timer(sk);
2250 __l2cap_sock_close(sk, 0);
2252 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2253 err = bt_sock_wait_state(sk, BT_CLOSED,
2257 if (!err && sk->sk_err)
2264 static int l2cap_sock_release(struct socket *sock)
2266 struct sock *sk = sock->sk;
2269 BT_DBG("sock %p, sk %p", sock, sk);
2274 err = l2cap_sock_shutdown(sock, 2);
2277 l2cap_sock_kill(sk);
2281 static void l2cap_chan_ready(struct sock *sk)
2283 struct sock *parent = bt_sk(sk)->parent;
2285 BT_DBG("sk %p, parent %p", sk, parent);
2287 l2cap_pi(sk)->conf_state = 0;
2288 l2cap_sock_clear_timer(sk);
2291 /* Outgoing channel.
2292 * Wake up socket sleeping on connect.
2294 sk->sk_state = BT_CONNECTED;
2295 sk->sk_state_change(sk);
2297 /* Incoming channel.
2298 * Wake up socket sleeping on accept.
2300 parent->sk_data_ready(parent, 0);
2304 /* Copy frame to all raw sockets on that connection */
2305 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2307 struct l2cap_chan_list *l = &conn->chan_list;
2308 struct sk_buff *nskb;
2311 BT_DBG("conn %p", conn);
2313 read_lock(&l->lock);
2314 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2315 if (sk->sk_type != SOCK_RAW)
2318 /* Don't send frame to the socket it came from */
2321 nskb = skb_clone(skb, GFP_ATOMIC);
2325 if (sock_queue_rcv_skb(sk, nskb))
2328 read_unlock(&l->lock);
2331 /* ---- L2CAP signalling commands ---- */
2332 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2333 u8 code, u8 ident, u16 dlen, void *data)
2335 struct sk_buff *skb, **frag;
2336 struct l2cap_cmd_hdr *cmd;
2337 struct l2cap_hdr *lh;
2340 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2341 conn, code, ident, dlen);
2343 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2344 count = min_t(unsigned int, conn->mtu, len);
2346 skb = bt_skb_alloc(count, GFP_ATOMIC);
2350 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2351 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2352 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2354 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2357 cmd->len = cpu_to_le16(dlen);
2360 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2361 memcpy(skb_put(skb, count), data, count);
2367 /* Continuation fragments (no L2CAP header) */
2368 frag = &skb_shinfo(skb)->frag_list;
2370 count = min_t(unsigned int, conn->mtu, len);
2372 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2376 memcpy(skb_put(*frag, count), data, count);
2381 frag = &(*frag)->next;
2391 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2393 struct l2cap_conf_opt *opt = *ptr;
2396 len = L2CAP_CONF_OPT_SIZE + opt->len;
2404 *val = *((u8 *) opt->val);
2408 *val = __le16_to_cpu(*((__le16 *) opt->val));
2412 *val = __le32_to_cpu(*((__le32 *) opt->val));
2416 *val = (unsigned long) opt->val;
2420 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2424 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2426 struct l2cap_conf_opt *opt = *ptr;
2428 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2435 *((u8 *) opt->val) = val;
2439 *((__le16 *) opt->val) = cpu_to_le16(val);
2443 *((__le32 *) opt->val) = cpu_to_le32(val);
2447 memcpy(opt->val, (void *) val, len);
2451 *ptr += L2CAP_CONF_OPT_SIZE + len;
2454 static void l2cap_ack_timeout(unsigned long arg)
2456 struct sock *sk = (void *) arg;
2459 l2cap_send_ack(l2cap_pi(sk));
2463 static inline void l2cap_ertm_init(struct sock *sk)
2465 l2cap_pi(sk)->expected_ack_seq = 0;
2466 l2cap_pi(sk)->unacked_frames = 0;
2467 l2cap_pi(sk)->buffer_seq = 0;
2468 l2cap_pi(sk)->num_acked = 0;
2469 l2cap_pi(sk)->frames_sent = 0;
2471 setup_timer(&l2cap_pi(sk)->retrans_timer,
2472 l2cap_retrans_timeout, (unsigned long) sk);
2473 setup_timer(&l2cap_pi(sk)->monitor_timer,
2474 l2cap_monitor_timeout, (unsigned long) sk);
2475 setup_timer(&l2cap_pi(sk)->ack_timer,
2476 l2cap_ack_timeout, (unsigned long) sk);
2478 __skb_queue_head_init(SREJ_QUEUE(sk));
2479 __skb_queue_head_init(BUSY_QUEUE(sk));
2481 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2483 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2486 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2489 case L2CAP_MODE_STREAMING:
2490 case L2CAP_MODE_ERTM:
2491 if (l2cap_mode_supported(mode, remote_feat_mask))
2495 return L2CAP_MODE_BASIC;
2499 static int l2cap_build_conf_req(struct sock *sk, void *data)
2501 struct l2cap_pinfo *pi = l2cap_pi(sk);
2502 struct l2cap_conf_req *req = data;
2503 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2504 void *ptr = req->data;
2506 BT_DBG("sk %p", sk);
2508 if (pi->num_conf_req || pi->num_conf_rsp)
2512 case L2CAP_MODE_STREAMING:
2513 case L2CAP_MODE_ERTM:
2514 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2519 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2525 case L2CAP_MODE_BASIC:
2526 if (pi->imtu != L2CAP_DEFAULT_MTU)
2527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2529 rfc.mode = L2CAP_MODE_BASIC;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2538 case L2CAP_MODE_ERTM:
2539 rfc.mode = L2CAP_MODE_ERTM;
2540 rfc.txwin_size = pi->tx_win;
2541 rfc.max_transmit = pi->max_tx;
2542 rfc.retrans_timeout = 0;
2543 rfc.monitor_timeout = 0;
2544 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2545 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2546 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2548 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2551 if (pi->fcs == L2CAP_FCS_NONE ||
2552 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2553 pi->fcs = L2CAP_FCS_NONE;
2554 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2558 case L2CAP_MODE_STREAMING:
2559 rfc.mode = L2CAP_MODE_STREAMING;
2561 rfc.max_transmit = 0;
2562 rfc.retrans_timeout = 0;
2563 rfc.monitor_timeout = 0;
2564 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2565 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2566 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2568 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2571 if (pi->fcs == L2CAP_FCS_NONE ||
2572 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2573 pi->fcs = L2CAP_FCS_NONE;
2574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2580 (unsigned long) &rfc);
2582 /* FIXME: Need actual value of the flush timeout */
2583 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2584 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2586 req->dcid = cpu_to_le16(pi->dcid);
2587 req->flags = cpu_to_le16(0);
2592 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2594 struct l2cap_pinfo *pi = l2cap_pi(sk);
2595 struct l2cap_conf_rsp *rsp = data;
2596 void *ptr = rsp->data;
2597 void *req = pi->conf_req;
2598 int len = pi->conf_len;
2599 int type, hint, olen;
2601 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2602 u16 mtu = L2CAP_DEFAULT_MTU;
2603 u16 result = L2CAP_CONF_SUCCESS;
2605 BT_DBG("sk %p", sk);
2607 while (len >= L2CAP_CONF_OPT_SIZE) {
2608 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2610 hint = type & L2CAP_CONF_HINT;
2611 type &= L2CAP_CONF_MASK;
2614 case L2CAP_CONF_MTU:
2618 case L2CAP_CONF_FLUSH_TO:
2622 case L2CAP_CONF_QOS:
2625 case L2CAP_CONF_RFC:
2626 if (olen == sizeof(rfc))
2627 memcpy(&rfc, (void *) val, olen);
2630 case L2CAP_CONF_FCS:
2631 if (val == L2CAP_FCS_NONE)
2632 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2640 result = L2CAP_CONF_UNKNOWN;
2641 *((u8 *) ptr++) = type;
2646 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2650 case L2CAP_MODE_STREAMING:
2651 case L2CAP_MODE_ERTM:
2652 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2653 pi->mode = l2cap_select_mode(rfc.mode,
2654 pi->conn->feat_mask);
2658 if (pi->mode != rfc.mode)
2659 return -ECONNREFUSED;
2665 if (pi->mode != rfc.mode) {
2666 result = L2CAP_CONF_UNACCEPT;
2667 rfc.mode = pi->mode;
2669 if (pi->num_conf_rsp == 1)
2670 return -ECONNREFUSED;
2672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2673 sizeof(rfc), (unsigned long) &rfc);
2677 if (result == L2CAP_CONF_SUCCESS) {
2678 /* Configure output options and let the other side know
2679 * which ones we don't like. */
2681 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2682 result = L2CAP_CONF_UNACCEPT;
2685 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2687 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2690 case L2CAP_MODE_BASIC:
2691 pi->fcs = L2CAP_FCS_NONE;
2692 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2695 case L2CAP_MODE_ERTM:
2696 pi->remote_tx_win = rfc.txwin_size;
2697 pi->remote_max_tx = rfc.max_transmit;
2698 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2699 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2701 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2703 rfc.retrans_timeout =
2704 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2705 rfc.monitor_timeout =
2706 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2708 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2710 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2711 sizeof(rfc), (unsigned long) &rfc);
2715 case L2CAP_MODE_STREAMING:
2716 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2717 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2719 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2721 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2723 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2724 sizeof(rfc), (unsigned long) &rfc);
2729 result = L2CAP_CONF_UNACCEPT;
2731 memset(&rfc, 0, sizeof(rfc));
2732 rfc.mode = pi->mode;
2735 if (result == L2CAP_CONF_SUCCESS)
2736 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2738 rsp->scid = cpu_to_le16(pi->dcid);
2739 rsp->result = cpu_to_le16(result);
2740 rsp->flags = cpu_to_le16(0x0000);
2745 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2747 struct l2cap_pinfo *pi = l2cap_pi(sk);
2748 struct l2cap_conf_req *req = data;
2749 void *ptr = req->data;
2752 struct l2cap_conf_rfc rfc;
2754 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2756 while (len >= L2CAP_CONF_OPT_SIZE) {
2757 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2760 case L2CAP_CONF_MTU:
2761 if (val < L2CAP_DEFAULT_MIN_MTU) {
2762 *result = L2CAP_CONF_UNACCEPT;
2763 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2769 case L2CAP_CONF_FLUSH_TO:
2771 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2775 case L2CAP_CONF_RFC:
2776 if (olen == sizeof(rfc))
2777 memcpy(&rfc, (void *)val, olen);
2779 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2780 rfc.mode != pi->mode)
2781 return -ECONNREFUSED;
2785 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2786 sizeof(rfc), (unsigned long) &rfc);
2791 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2792 return -ECONNREFUSED;
2794 pi->mode = rfc.mode;
2796 if (*result == L2CAP_CONF_SUCCESS) {
2798 case L2CAP_MODE_ERTM:
2799 pi->remote_tx_win = rfc.txwin_size;
2800 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2801 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2802 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2804 case L2CAP_MODE_STREAMING:
2805 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2809 req->dcid = cpu_to_le16(pi->dcid);
2810 req->flags = cpu_to_le16(0x0000);
2815 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2817 struct l2cap_conf_rsp *rsp = data;
2818 void *ptr = rsp->data;
2820 BT_DBG("sk %p", sk);
2822 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2823 rsp->result = cpu_to_le16(result);
2824 rsp->flags = cpu_to_le16(flags);
2829 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2831 struct l2cap_pinfo *pi = l2cap_pi(sk);
2834 struct l2cap_conf_rfc rfc;
2836 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2838 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2841 while (len >= L2CAP_CONF_OPT_SIZE) {
2842 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2845 case L2CAP_CONF_RFC:
2846 if (olen == sizeof(rfc))
2847 memcpy(&rfc, (void *)val, olen);
2854 case L2CAP_MODE_ERTM:
2855 pi->remote_tx_win = rfc.txwin_size;
2856 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2857 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2858 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2860 case L2CAP_MODE_STREAMING:
2861 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2865 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2867 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2869 if (rej->reason != 0x0000)
2872 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2873 cmd->ident == conn->info_ident) {
2874 del_timer(&conn->info_timer);
2876 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2877 conn->info_ident = 0;
2879 l2cap_conn_start(conn);
2885 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2887 struct l2cap_chan_list *list = &conn->chan_list;
2888 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2889 struct l2cap_conn_rsp rsp;
2890 struct sock *parent, *uninitialized_var(sk);
2891 int result, status = L2CAP_CS_NO_INFO;
2893 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2894 __le16 psm = req->psm;
2896 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2898 /* Check if we have socket listening on psm */
2899 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2901 result = L2CAP_CR_BAD_PSM;
2905 /* Check if the ACL is secure enough (if not SDP) */
2906 if (psm != cpu_to_le16(0x0001) &&
2907 !hci_conn_check_link_mode(conn->hcon)) {
2908 conn->disc_reason = 0x05;
2909 result = L2CAP_CR_SEC_BLOCK;
2913 result = L2CAP_CR_NO_MEM;
2915 /* Check for backlog size */
2916 if (sk_acceptq_is_full(parent)) {
2917 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2921 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2925 write_lock_bh(&list->lock);
2927 /* Check if we already have channel with that dcid */
2928 if (__l2cap_get_chan_by_dcid(list, scid)) {
2929 write_unlock_bh(&list->lock);
2930 sock_set_flag(sk, SOCK_ZAPPED);
2931 l2cap_sock_kill(sk);
2935 hci_conn_hold(conn->hcon);
2937 l2cap_sock_init(sk, parent);
2938 bacpy(&bt_sk(sk)->src, conn->src);
2939 bacpy(&bt_sk(sk)->dst, conn->dst);
2940 l2cap_pi(sk)->psm = psm;
2941 l2cap_pi(sk)->dcid = scid;
2943 __l2cap_chan_add(conn, sk, parent);
2944 dcid = l2cap_pi(sk)->scid;
2946 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2948 l2cap_pi(sk)->ident = cmd->ident;
2950 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2951 if (l2cap_check_security(sk)) {
2952 if (bt_sk(sk)->defer_setup) {
2953 sk->sk_state = BT_CONNECT2;
2954 result = L2CAP_CR_PEND;
2955 status = L2CAP_CS_AUTHOR_PEND;
2956 parent->sk_data_ready(parent, 0);
2958 sk->sk_state = BT_CONFIG;
2959 result = L2CAP_CR_SUCCESS;
2960 status = L2CAP_CS_NO_INFO;
2963 sk->sk_state = BT_CONNECT2;
2964 result = L2CAP_CR_PEND;
2965 status = L2CAP_CS_AUTHEN_PEND;
2968 sk->sk_state = BT_CONNECT2;
2969 result = L2CAP_CR_PEND;
2970 status = L2CAP_CS_NO_INFO;
2973 write_unlock_bh(&list->lock);
2976 bh_unlock_sock(parent);
2979 rsp.scid = cpu_to_le16(scid);
2980 rsp.dcid = cpu_to_le16(dcid);
2981 rsp.result = cpu_to_le16(result);
2982 rsp.status = cpu_to_le16(status);
2983 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2985 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2986 struct l2cap_info_req info;
2987 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2989 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2990 conn->info_ident = l2cap_get_ident(conn);
2992 mod_timer(&conn->info_timer, jiffies +
2993 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2995 l2cap_send_cmd(conn, conn->info_ident,
2996 L2CAP_INFO_REQ, sizeof(info), &info);
2999 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3000 result == L2CAP_CR_SUCCESS) {
3002 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3003 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3004 l2cap_build_conf_req(sk, buf), buf);
3005 l2cap_pi(sk)->num_conf_req++;
3011 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3013 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3014 u16 scid, dcid, result, status;
3018 scid = __le16_to_cpu(rsp->scid);
3019 dcid = __le16_to_cpu(rsp->dcid);
3020 result = __le16_to_cpu(rsp->result);
3021 status = __le16_to_cpu(rsp->status);
3023 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3026 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3030 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3036 case L2CAP_CR_SUCCESS:
3037 sk->sk_state = BT_CONFIG;
3038 l2cap_pi(sk)->ident = 0;
3039 l2cap_pi(sk)->dcid = dcid;
3040 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3042 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3045 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3047 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3048 l2cap_build_conf_req(sk, req), req);
3049 l2cap_pi(sk)->num_conf_req++;
3053 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3057 l2cap_chan_del(sk, ECONNREFUSED);
3065 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3067 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3073 dcid = __le16_to_cpu(req->dcid);
3074 flags = __le16_to_cpu(req->flags);
3076 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3078 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3082 if (sk->sk_state != BT_CONFIG) {
3083 struct l2cap_cmd_rej rej;
3085 rej.reason = cpu_to_le16(0x0002);
3086 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3091 /* Reject if config buffer is too small. */
3092 len = cmd_len - sizeof(*req);
3093 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3094 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3095 l2cap_build_conf_rsp(sk, rsp,
3096 L2CAP_CONF_REJECT, flags), rsp);
3101 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3102 l2cap_pi(sk)->conf_len += len;
3104 if (flags & 0x0001) {
3105 /* Incomplete config. Send empty response. */
3106 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3107 l2cap_build_conf_rsp(sk, rsp,
3108 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3112 /* Complete config. */
3113 len = l2cap_parse_conf_req(sk, rsp);
3115 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3119 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3120 l2cap_pi(sk)->num_conf_rsp++;
3122 /* Reset config buffer. */
3123 l2cap_pi(sk)->conf_len = 0;
3125 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3128 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3129 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3130 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3131 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3133 sk->sk_state = BT_CONNECTED;
3135 l2cap_pi(sk)->next_tx_seq = 0;
3136 l2cap_pi(sk)->expected_tx_seq = 0;
3137 __skb_queue_head_init(TX_QUEUE(sk));
3138 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3139 l2cap_ertm_init(sk);
3141 l2cap_chan_ready(sk);
3145 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3147 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3148 l2cap_build_conf_req(sk, buf), buf);
3149 l2cap_pi(sk)->num_conf_req++;
3157 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3159 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3160 u16 scid, flags, result;
3162 int len = cmd->len - sizeof(*rsp);
3164 scid = __le16_to_cpu(rsp->scid);
3165 flags = __le16_to_cpu(rsp->flags);
3166 result = __le16_to_cpu(rsp->result);
3168 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3169 scid, flags, result);
3171 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3176 case L2CAP_CONF_SUCCESS:
3177 l2cap_conf_rfc_get(sk, rsp->data, len);
3180 case L2CAP_CONF_UNACCEPT:
3181 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3184 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3185 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3189 /* throw out any old stored conf requests */
3190 result = L2CAP_CONF_SUCCESS;
3191 len = l2cap_parse_conf_rsp(sk, rsp->data,
3194 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3198 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3199 L2CAP_CONF_REQ, len, req);
3200 l2cap_pi(sk)->num_conf_req++;
3201 if (result != L2CAP_CONF_SUCCESS)
3207 sk->sk_err = ECONNRESET;
3208 l2cap_sock_set_timer(sk, HZ * 5);
3209 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3216 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3218 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3219 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3220 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3221 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3223 sk->sk_state = BT_CONNECTED;
3224 l2cap_pi(sk)->next_tx_seq = 0;
3225 l2cap_pi(sk)->expected_tx_seq = 0;
3226 __skb_queue_head_init(TX_QUEUE(sk));
3227 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3228 l2cap_ertm_init(sk);
3230 l2cap_chan_ready(sk);
3238 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3240 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3241 struct l2cap_disconn_rsp rsp;
3245 scid = __le16_to_cpu(req->scid);
3246 dcid = __le16_to_cpu(req->dcid);
3248 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3250 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3254 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3255 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3256 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3258 sk->sk_shutdown = SHUTDOWN_MASK;
3260 l2cap_chan_del(sk, ECONNRESET);
3263 l2cap_sock_kill(sk);
3267 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3269 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3273 scid = __le16_to_cpu(rsp->scid);
3274 dcid = __le16_to_cpu(rsp->dcid);
3276 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3278 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3282 l2cap_chan_del(sk, 0);
3285 l2cap_sock_kill(sk);
3289 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3291 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3294 type = __le16_to_cpu(req->type);
3296 BT_DBG("type 0x%4.4x", type);
3298 if (type == L2CAP_IT_FEAT_MASK) {
3300 u32 feat_mask = l2cap_feat_mask;
3301 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3302 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3303 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3305 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3307 put_unaligned_le32(feat_mask, rsp->data);
3308 l2cap_send_cmd(conn, cmd->ident,
3309 L2CAP_INFO_RSP, sizeof(buf), buf);
3310 } else if (type == L2CAP_IT_FIXED_CHAN) {
3312 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3313 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3314 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3315 memcpy(buf + 4, l2cap_fixed_chan, 8);
3316 l2cap_send_cmd(conn, cmd->ident,
3317 L2CAP_INFO_RSP, sizeof(buf), buf);
3319 struct l2cap_info_rsp rsp;
3320 rsp.type = cpu_to_le16(type);
3321 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3322 l2cap_send_cmd(conn, cmd->ident,
3323 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3329 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3331 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3334 type = __le16_to_cpu(rsp->type);
3335 result = __le16_to_cpu(rsp->result);
3337 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3339 del_timer(&conn->info_timer);
3341 if (type == L2CAP_IT_FEAT_MASK) {
3342 conn->feat_mask = get_unaligned_le32(rsp->data);
3344 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3345 struct l2cap_info_req req;
3346 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3348 conn->info_ident = l2cap_get_ident(conn);
3350 l2cap_send_cmd(conn, conn->info_ident,
3351 L2CAP_INFO_REQ, sizeof(req), &req);
3353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3354 conn->info_ident = 0;
3356 l2cap_conn_start(conn);
3358 } else if (type == L2CAP_IT_FIXED_CHAN) {
3359 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3360 conn->info_ident = 0;
3362 l2cap_conn_start(conn);
3368 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3370 u8 *data = skb->data;
3372 struct l2cap_cmd_hdr cmd;
3375 l2cap_raw_recv(conn, skb);
3377 while (len >= L2CAP_CMD_HDR_SIZE) {
3379 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3380 data += L2CAP_CMD_HDR_SIZE;
3381 len -= L2CAP_CMD_HDR_SIZE;
3383 cmd_len = le16_to_cpu(cmd.len);
3385 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3387 if (cmd_len > len || !cmd.ident) {
3388 BT_DBG("corrupted command");
3393 case L2CAP_COMMAND_REJ:
3394 l2cap_command_rej(conn, &cmd, data);
3397 case L2CAP_CONN_REQ:
3398 err = l2cap_connect_req(conn, &cmd, data);
3401 case L2CAP_CONN_RSP:
3402 err = l2cap_connect_rsp(conn, &cmd, data);
3405 case L2CAP_CONF_REQ:
3406 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3409 case L2CAP_CONF_RSP:
3410 err = l2cap_config_rsp(conn, &cmd, data);
3413 case L2CAP_DISCONN_REQ:
3414 err = l2cap_disconnect_req(conn, &cmd, data);
3417 case L2CAP_DISCONN_RSP:
3418 err = l2cap_disconnect_rsp(conn, &cmd, data);
3421 case L2CAP_ECHO_REQ:
3422 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3425 case L2CAP_ECHO_RSP:
3428 case L2CAP_INFO_REQ:
3429 err = l2cap_information_req(conn, &cmd, data);
3432 case L2CAP_INFO_RSP:
3433 err = l2cap_information_rsp(conn, &cmd, data);
3437 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3443 struct l2cap_cmd_rej rej;
3444 BT_DBG("error %d", err);
3446 /* FIXME: Map err to a valid reason */
3447 rej.reason = cpu_to_le16(0);
3448 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3458 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3460 u16 our_fcs, rcv_fcs;
3461 int hdr_size = L2CAP_HDR_SIZE + 2;
3463 if (pi->fcs == L2CAP_FCS_CRC16) {
3464 skb_trim(skb, skb->len - 2);
3465 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3466 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3468 if (our_fcs != rcv_fcs)
3474 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3476 struct l2cap_pinfo *pi = l2cap_pi(sk);
3479 pi->frames_sent = 0;
3481 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3483 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3484 control |= L2CAP_SUPER_RCV_NOT_READY;
3485 l2cap_send_sframe(pi, control);
3486 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3489 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3490 l2cap_retransmit_frames(sk);
3492 l2cap_ertm_send(sk);
3494 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3495 pi->frames_sent == 0) {
3496 control |= L2CAP_SUPER_RCV_READY;
3497 l2cap_send_sframe(pi, control);
3501 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3503 struct sk_buff *next_skb;
3504 struct l2cap_pinfo *pi = l2cap_pi(sk);
3505 int tx_seq_offset, next_tx_seq_offset;
3507 bt_cb(skb)->tx_seq = tx_seq;
3508 bt_cb(skb)->sar = sar;
3510 next_skb = skb_peek(SREJ_QUEUE(sk));
3512 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3516 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3517 if (tx_seq_offset < 0)
3518 tx_seq_offset += 64;
3521 if (bt_cb(next_skb)->tx_seq == tx_seq)
3524 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3525 pi->buffer_seq) % 64;
3526 if (next_tx_seq_offset < 0)
3527 next_tx_seq_offset += 64;
3529 if (next_tx_seq_offset > tx_seq_offset) {
3530 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3534 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3537 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3539 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3544 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3546 struct l2cap_pinfo *pi = l2cap_pi(sk);
3547 struct sk_buff *_skb;
3550 switch (control & L2CAP_CTRL_SAR) {
3551 case L2CAP_SDU_UNSEGMENTED:
3552 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3555 err = sock_queue_rcv_skb(sk, skb);
3561 case L2CAP_SDU_START:
3562 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3565 pi->sdu_len = get_unaligned_le16(skb->data);
3567 if (pi->sdu_len > pi->imtu)
3570 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3574 /* pull sdu_len bytes only after alloc, because of Local Busy
3575 * condition we have to be sure that this will be executed
3576 * only once, i.e., when alloc does not fail */
3579 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3581 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3582 pi->partial_sdu_len = skb->len;
3585 case L2CAP_SDU_CONTINUE:
3586 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3592 pi->partial_sdu_len += skb->len;
3593 if (pi->partial_sdu_len > pi->sdu_len)
3596 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3601 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3607 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3608 pi->partial_sdu_len += skb->len;
3610 if (pi->partial_sdu_len > pi->imtu)
3613 if (pi->partial_sdu_len != pi->sdu_len)
3616 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3619 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3621 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3625 err = sock_queue_rcv_skb(sk, _skb);
3628 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3632 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3633 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3647 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3652 static int l2cap_try_push_rx_skb(struct sock *sk)
3654 struct l2cap_pinfo *pi = l2cap_pi(sk);
3655 struct sk_buff *skb;
3659 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3660 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3661 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3663 skb_queue_head(BUSY_QUEUE(sk), skb);
3667 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3670 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3673 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3674 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3675 l2cap_send_sframe(pi, control);
3676 l2cap_pi(sk)->retry_count = 1;
3678 del_timer(&pi->retrans_timer);
3679 __mod_monitor_timer();
3681 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3684 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3685 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3687 BT_DBG("sk %p, Exit local busy", sk);
3692 static void l2cap_busy_work(struct work_struct *work)
3694 DECLARE_WAITQUEUE(wait, current);
3695 struct l2cap_pinfo *pi =
3696 container_of(work, struct l2cap_pinfo, busy_work);
3697 struct sock *sk = (struct sock *)pi;
3698 int n_tries = 0, timeo = HZ/5, err;
3699 struct sk_buff *skb;
3703 add_wait_queue(sk_sleep(sk), &wait);
3704 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3705 set_current_state(TASK_INTERRUPTIBLE);
3707 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3709 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3716 if (signal_pending(current)) {
3717 err = sock_intr_errno(timeo);
3722 timeo = schedule_timeout(timeo);
3725 err = sock_error(sk);
3729 if (l2cap_try_push_rx_skb(sk) == 0)
3733 set_current_state(TASK_RUNNING);
3734 remove_wait_queue(sk_sleep(sk), &wait);
3739 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3741 struct l2cap_pinfo *pi = l2cap_pi(sk);
3744 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3745 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3746 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3747 return l2cap_try_push_rx_skb(sk);
3752 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3754 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3758 /* Busy Condition */
3759 BT_DBG("sk %p, Enter local busy", sk);
3761 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3762 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3763 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3765 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3766 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3767 l2cap_send_sframe(pi, sctrl);
3769 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3771 del_timer(&pi->ack_timer);
3773 queue_work(_busy_wq, &pi->busy_work);
3778 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3780 struct l2cap_pinfo *pi = l2cap_pi(sk);
3781 struct sk_buff *_skb;
3785 * TODO: We have to notify the userland if some data is lost with the
3789 switch (control & L2CAP_CTRL_SAR) {
3790 case L2CAP_SDU_UNSEGMENTED:
3791 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3796 err = sock_queue_rcv_skb(sk, skb);
3802 case L2CAP_SDU_START:
3803 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3808 pi->sdu_len = get_unaligned_le16(skb->data);
3811 if (pi->sdu_len > pi->imtu) {
3816 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3822 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3824 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3825 pi->partial_sdu_len = skb->len;
3829 case L2CAP_SDU_CONTINUE:
3830 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3833 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3835 pi->partial_sdu_len += skb->len;
3836 if (pi->partial_sdu_len > pi->sdu_len)
3844 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3847 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3849 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3850 pi->partial_sdu_len += skb->len;
3852 if (pi->partial_sdu_len > pi->imtu)
3855 if (pi->partial_sdu_len == pi->sdu_len) {
3856 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3857 err = sock_queue_rcv_skb(sk, _skb);
3872 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3874 struct sk_buff *skb;
3877 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3878 if (bt_cb(skb)->tx_seq != tx_seq)
3881 skb = skb_dequeue(SREJ_QUEUE(sk));
3882 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3883 l2cap_ertm_reassembly_sdu(sk, skb, control);
3884 l2cap_pi(sk)->buffer_seq_srej =
3885 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3886 tx_seq = (tx_seq + 1) % 64;
3890 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3892 struct l2cap_pinfo *pi = l2cap_pi(sk);
3893 struct srej_list *l, *tmp;
3896 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3897 if (l->tx_seq == tx_seq) {
3902 control = L2CAP_SUPER_SELECT_REJECT;
3903 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3904 l2cap_send_sframe(pi, control);
3906 list_add_tail(&l->list, SREJ_LIST(sk));
3910 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3912 struct l2cap_pinfo *pi = l2cap_pi(sk);
3913 struct srej_list *new;
3916 while (tx_seq != pi->expected_tx_seq) {
3917 control = L2CAP_SUPER_SELECT_REJECT;
3918 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3919 l2cap_send_sframe(pi, control);
3921 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3922 new->tx_seq = pi->expected_tx_seq;
3923 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3924 list_add_tail(&new->list, SREJ_LIST(sk));
3926 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3929 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3931 struct l2cap_pinfo *pi = l2cap_pi(sk);
3932 u8 tx_seq = __get_txseq(rx_control);
3933 u8 req_seq = __get_reqseq(rx_control);
3934 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3935 int tx_seq_offset, expected_tx_seq_offset;
3936 int num_to_ack = (pi->tx_win/6) + 1;
3939 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3942 if (L2CAP_CTRL_FINAL & rx_control &&
3943 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3944 del_timer(&pi->monitor_timer);
3945 if (pi->unacked_frames > 0)
3946 __mod_retrans_timer();
3947 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3950 pi->expected_ack_seq = req_seq;
3951 l2cap_drop_acked_frames(sk);
3953 if (tx_seq == pi->expected_tx_seq)
3956 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3957 if (tx_seq_offset < 0)
3958 tx_seq_offset += 64;
3960 /* invalid tx_seq */
3961 if (tx_seq_offset >= pi->tx_win) {
3962 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3966 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3969 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3970 struct srej_list *first;
3972 first = list_first_entry(SREJ_LIST(sk),
3973 struct srej_list, list);
3974 if (tx_seq == first->tx_seq) {
3975 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3976 l2cap_check_srej_gap(sk, tx_seq);
3978 list_del(&first->list);
3981 if (list_empty(SREJ_LIST(sk))) {
3982 pi->buffer_seq = pi->buffer_seq_srej;
3983 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3985 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3988 struct srej_list *l;
3990 /* duplicated tx_seq */
3991 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3994 list_for_each_entry(l, SREJ_LIST(sk), list) {
3995 if (l->tx_seq == tx_seq) {
3996 l2cap_resend_srejframe(sk, tx_seq);
4000 l2cap_send_srejframe(sk, tx_seq);
4003 expected_tx_seq_offset =
4004 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4005 if (expected_tx_seq_offset < 0)
4006 expected_tx_seq_offset += 64;
4008 /* duplicated tx_seq */
4009 if (tx_seq_offset < expected_tx_seq_offset)
4012 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4014 BT_DBG("sk %p, Enter SREJ", sk);
4016 INIT_LIST_HEAD(SREJ_LIST(sk));
4017 pi->buffer_seq_srej = pi->buffer_seq;
4019 __skb_queue_head_init(SREJ_QUEUE(sk));
4020 __skb_queue_head_init(BUSY_QUEUE(sk));
4021 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4023 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4025 l2cap_send_srejframe(sk, tx_seq);
4027 del_timer(&pi->ack_timer);
4032 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4034 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4035 bt_cb(skb)->tx_seq = tx_seq;
4036 bt_cb(skb)->sar = sar;
4037 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4041 err = l2cap_push_rx_skb(sk, skb, rx_control);
4045 if (rx_control & L2CAP_CTRL_FINAL) {
4046 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4047 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4049 l2cap_retransmit_frames(sk);
4054 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4055 if (pi->num_acked == num_to_ack - 1)
4065 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4067 struct l2cap_pinfo *pi = l2cap_pi(sk);
4069 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4072 pi->expected_ack_seq = __get_reqseq(rx_control);
4073 l2cap_drop_acked_frames(sk);
4075 if (rx_control & L2CAP_CTRL_POLL) {
4076 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4077 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4078 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4079 (pi->unacked_frames > 0))
4080 __mod_retrans_timer();
4082 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4083 l2cap_send_srejtail(sk);
4085 l2cap_send_i_or_rr_or_rnr(sk);
4088 } else if (rx_control & L2CAP_CTRL_FINAL) {
4089 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4091 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4092 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4094 l2cap_retransmit_frames(sk);
4097 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4098 (pi->unacked_frames > 0))
4099 __mod_retrans_timer();
4101 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4102 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4105 l2cap_ertm_send(sk);
4110 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4112 struct l2cap_pinfo *pi = l2cap_pi(sk);
4113 u8 tx_seq = __get_reqseq(rx_control);
4115 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4117 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4119 pi->expected_ack_seq = tx_seq;
4120 l2cap_drop_acked_frames(sk);
4122 if (rx_control & L2CAP_CTRL_FINAL) {
4123 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4124 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4126 l2cap_retransmit_frames(sk);
4128 l2cap_retransmit_frames(sk);
4130 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4131 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4134 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4136 struct l2cap_pinfo *pi = l2cap_pi(sk);
4137 u8 tx_seq = __get_reqseq(rx_control);
4139 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4141 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4143 if (rx_control & L2CAP_CTRL_POLL) {
4144 pi->expected_ack_seq = tx_seq;
4145 l2cap_drop_acked_frames(sk);
4147 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4148 l2cap_retransmit_one_frame(sk, tx_seq);
4150 l2cap_ertm_send(sk);
4152 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4153 pi->srej_save_reqseq = tx_seq;
4154 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4156 } else if (rx_control & L2CAP_CTRL_FINAL) {
4157 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4158 pi->srej_save_reqseq == tx_seq)
4159 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4161 l2cap_retransmit_one_frame(sk, tx_seq);
4163 l2cap_retransmit_one_frame(sk, tx_seq);
4164 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4165 pi->srej_save_reqseq = tx_seq;
4166 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4171 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4173 struct l2cap_pinfo *pi = l2cap_pi(sk);
4174 u8 tx_seq = __get_reqseq(rx_control);
4176 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4178 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4179 pi->expected_ack_seq = tx_seq;
4180 l2cap_drop_acked_frames(sk);
4182 if (rx_control & L2CAP_CTRL_POLL)
4183 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4185 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4186 del_timer(&pi->retrans_timer);
4187 if (rx_control & L2CAP_CTRL_POLL)
4188 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4192 if (rx_control & L2CAP_CTRL_POLL)
4193 l2cap_send_srejtail(sk);
4195 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4198 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4200 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4202 if (L2CAP_CTRL_FINAL & rx_control &&
4203 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4204 del_timer(&l2cap_pi(sk)->monitor_timer);
4205 if (l2cap_pi(sk)->unacked_frames > 0)
4206 __mod_retrans_timer();
4207 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4210 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4211 case L2CAP_SUPER_RCV_READY:
4212 l2cap_data_channel_rrframe(sk, rx_control);
4215 case L2CAP_SUPER_REJECT:
4216 l2cap_data_channel_rejframe(sk, rx_control);
4219 case L2CAP_SUPER_SELECT_REJECT:
4220 l2cap_data_channel_srejframe(sk, rx_control);
4223 case L2CAP_SUPER_RCV_NOT_READY:
4224 l2cap_data_channel_rnrframe(sk, rx_control);
4232 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4234 struct l2cap_pinfo *pi = l2cap_pi(sk);
4237 int len, next_tx_seq_offset, req_seq_offset;
4239 control = get_unaligned_le16(skb->data);
4244 * We can just drop the corrupted I-frame here.
4245 * Receiver will miss it and start proper recovery
4246 * procedures and ask retransmission.
4248 if (l2cap_check_fcs(pi, skb))
4251 if (__is_sar_start(control) && __is_iframe(control))
4254 if (pi->fcs == L2CAP_FCS_CRC16)
4257 if (len > pi->mps) {
4258 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4262 req_seq = __get_reqseq(control);
4263 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4264 if (req_seq_offset < 0)
4265 req_seq_offset += 64;
4267 next_tx_seq_offset =
4268 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4269 if (next_tx_seq_offset < 0)
4270 next_tx_seq_offset += 64;
4272 /* check for invalid req-seq */
4273 if (req_seq_offset > next_tx_seq_offset) {
4274 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4278 if (__is_iframe(control)) {
4280 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4284 l2cap_data_channel_iframe(sk, control, skb);
4288 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4292 l2cap_data_channel_sframe(sk, control, skb);
4302 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4305 struct l2cap_pinfo *pi;
4310 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4312 BT_DBG("unknown cid 0x%4.4x", cid);
4318 BT_DBG("sk %p, len %d", sk, skb->len);
4320 if (sk->sk_state != BT_CONNECTED)
4324 case L2CAP_MODE_BASIC:
4325 /* If socket recv buffers overflows we drop data here
4326 * which is *bad* because L2CAP has to be reliable.
4327 * But we don't have any other choice. L2CAP doesn't
4328 * provide flow control mechanism. */
4330 if (pi->imtu < skb->len)
4333 if (!sock_queue_rcv_skb(sk, skb))
4337 case L2CAP_MODE_ERTM:
4338 if (!sock_owned_by_user(sk)) {
4339 l2cap_ertm_data_rcv(sk, skb);
4341 if (sk_add_backlog(sk, skb))
4347 case L2CAP_MODE_STREAMING:
4348 control = get_unaligned_le16(skb->data);
4352 if (l2cap_check_fcs(pi, skb))
4355 if (__is_sar_start(control))
4358 if (pi->fcs == L2CAP_FCS_CRC16)
4361 if (len > pi->mps || len < 0 || __is_sframe(control))
4364 tx_seq = __get_txseq(control);
4366 if (pi->expected_tx_seq == tx_seq)
4367 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4369 pi->expected_tx_seq = (tx_seq + 1) % 64;
4371 l2cap_streaming_reassembly_sdu(sk, skb, control);
4376 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4390 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4394 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4398 BT_DBG("sk %p, len %d", sk, skb->len);
4400 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4403 if (l2cap_pi(sk)->imtu < skb->len)
4406 if (!sock_queue_rcv_skb(sk, skb))
4418 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4420 struct l2cap_hdr *lh = (void *) skb->data;
4424 skb_pull(skb, L2CAP_HDR_SIZE);
4425 cid = __le16_to_cpu(lh->cid);
4426 len = __le16_to_cpu(lh->len);
4428 if (len != skb->len) {
4433 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4436 case L2CAP_CID_SIGNALING:
4437 l2cap_sig_channel(conn, skb);
4440 case L2CAP_CID_CONN_LESS:
4441 psm = get_unaligned_le16(skb->data);
4443 l2cap_conless_channel(conn, psm, skb);
4447 l2cap_data_channel(conn, cid, skb);
4452 /* ---- L2CAP interface with lower layer (HCI) ---- */
4454 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4456 int exact = 0, lm1 = 0, lm2 = 0;
4457 register struct sock *sk;
4458 struct hlist_node *node;
4460 if (type != ACL_LINK)
4463 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4465 /* Find listening sockets and check their link_mode */
4466 read_lock(&l2cap_sk_list.lock);
4467 sk_for_each(sk, node, &l2cap_sk_list.head) {
4468 if (sk->sk_state != BT_LISTEN)
4471 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4472 lm1 |= HCI_LM_ACCEPT;
4473 if (l2cap_pi(sk)->role_switch)
4474 lm1 |= HCI_LM_MASTER;
4476 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4477 lm2 |= HCI_LM_ACCEPT;
4478 if (l2cap_pi(sk)->role_switch)
4479 lm2 |= HCI_LM_MASTER;
4482 read_unlock(&l2cap_sk_list.lock);
4484 return exact ? lm1 : lm2;
4487 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4489 struct l2cap_conn *conn;
4491 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4493 if (hcon->type != ACL_LINK)
4497 conn = l2cap_conn_add(hcon, status);
4499 l2cap_conn_ready(conn);
4501 l2cap_conn_del(hcon, bt_err(status));
4506 static int l2cap_disconn_ind(struct hci_conn *hcon)
4508 struct l2cap_conn *conn = hcon->l2cap_data;
4510 BT_DBG("hcon %p", hcon);
4512 if (hcon->type != ACL_LINK || !conn)
4515 return conn->disc_reason;
4518 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4520 BT_DBG("hcon %p reason %d", hcon, reason);
4522 if (hcon->type != ACL_LINK)
4525 l2cap_conn_del(hcon, bt_err(reason));
4530 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4532 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4535 if (encrypt == 0x00) {
4536 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4537 l2cap_sock_clear_timer(sk);
4538 l2cap_sock_set_timer(sk, HZ * 5);
4539 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4540 __l2cap_sock_close(sk, ECONNREFUSED);
4542 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4543 l2cap_sock_clear_timer(sk);
4547 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4549 struct l2cap_chan_list *l;
4550 struct l2cap_conn *conn = hcon->l2cap_data;
4556 l = &conn->chan_list;
4558 BT_DBG("conn %p", conn);
4560 read_lock(&l->lock);
4562 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4565 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4570 if (!status && (sk->sk_state == BT_CONNECTED ||
4571 sk->sk_state == BT_CONFIG)) {
4572 l2cap_check_encryption(sk, encrypt);
4577 if (sk->sk_state == BT_CONNECT) {
4579 struct l2cap_conn_req req;
4580 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4581 req.psm = l2cap_pi(sk)->psm;
4583 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4584 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4586 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4587 L2CAP_CONN_REQ, sizeof(req), &req);
4589 l2cap_sock_clear_timer(sk);
4590 l2cap_sock_set_timer(sk, HZ / 10);
4592 } else if (sk->sk_state == BT_CONNECT2) {
4593 struct l2cap_conn_rsp rsp;
4597 sk->sk_state = BT_CONFIG;
4598 result = L2CAP_CR_SUCCESS;
4600 sk->sk_state = BT_DISCONN;
4601 l2cap_sock_set_timer(sk, HZ / 10);
4602 result = L2CAP_CR_SEC_BLOCK;
4605 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4606 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4607 rsp.result = cpu_to_le16(result);
4608 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4609 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4610 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4616 read_unlock(&l->lock);
4621 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4623 struct l2cap_conn *conn = hcon->l2cap_data;
4625 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4628 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4630 if (flags & ACL_START) {
4631 struct l2cap_hdr *hdr;
4635 BT_ERR("Unexpected start frame (len %d)", skb->len);
4636 kfree_skb(conn->rx_skb);
4637 conn->rx_skb = NULL;
4639 l2cap_conn_unreliable(conn, ECOMM);
4643 BT_ERR("Frame is too short (len %d)", skb->len);
4644 l2cap_conn_unreliable(conn, ECOMM);
4648 hdr = (struct l2cap_hdr *) skb->data;
4649 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4651 if (len == skb->len) {
4652 /* Complete frame received */
4653 l2cap_recv_frame(conn, skb);
4657 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4659 if (skb->len > len) {
4660 BT_ERR("Frame is too long (len %d, expected len %d)",
4662 l2cap_conn_unreliable(conn, ECOMM);
4666 /* Allocate skb for the complete frame (with header) */
4667 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4671 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4673 conn->rx_len = len - skb->len;
4675 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4677 if (!conn->rx_len) {
4678 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4679 l2cap_conn_unreliable(conn, ECOMM);
4683 if (skb->len > conn->rx_len) {
4684 BT_ERR("Fragment is too long (len %d, expected %d)",
4685 skb->len, conn->rx_len);
4686 kfree_skb(conn->rx_skb);
4687 conn->rx_skb = NULL;
4689 l2cap_conn_unreliable(conn, ECOMM);
4693 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4695 conn->rx_len -= skb->len;
4697 if (!conn->rx_len) {
4698 /* Complete frame received */
4699 l2cap_recv_frame(conn, conn->rx_skb);
4700 conn->rx_skb = NULL;
4709 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4712 struct hlist_node *node;
4714 read_lock_bh(&l2cap_sk_list.lock);
4716 sk_for_each(sk, node, &l2cap_sk_list.head) {
4717 struct l2cap_pinfo *pi = l2cap_pi(sk);
4719 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4720 batostr(&bt_sk(sk)->src),
4721 batostr(&bt_sk(sk)->dst),
4722 sk->sk_state, __le16_to_cpu(pi->psm),
4724 pi->imtu, pi->omtu, pi->sec_level);
4727 read_unlock_bh(&l2cap_sk_list.lock);
4732 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4734 return single_open(file, l2cap_debugfs_show, inode->i_private);
4737 static const struct file_operations l2cap_debugfs_fops = {
4738 .open = l2cap_debugfs_open,
4740 .llseek = seq_lseek,
4741 .release = single_release,
4744 static struct dentry *l2cap_debugfs;
4746 static const struct proto_ops l2cap_sock_ops = {
4747 .family = PF_BLUETOOTH,
4748 .owner = THIS_MODULE,
4749 .release = l2cap_sock_release,
4750 .bind = l2cap_sock_bind,
4751 .connect = l2cap_sock_connect,
4752 .listen = l2cap_sock_listen,
4753 .accept = l2cap_sock_accept,
4754 .getname = l2cap_sock_getname,
4755 .sendmsg = l2cap_sock_sendmsg,
4756 .recvmsg = l2cap_sock_recvmsg,
4757 .poll = bt_sock_poll,
4758 .ioctl = bt_sock_ioctl,
4759 .mmap = sock_no_mmap,
4760 .socketpair = sock_no_socketpair,
4761 .shutdown = l2cap_sock_shutdown,
4762 .setsockopt = l2cap_sock_setsockopt,
4763 .getsockopt = l2cap_sock_getsockopt
4766 static const struct net_proto_family l2cap_sock_family_ops = {
4767 .family = PF_BLUETOOTH,
4768 .owner = THIS_MODULE,
4769 .create = l2cap_sock_create,
4772 static struct hci_proto l2cap_hci_proto = {
4774 .id = HCI_PROTO_L2CAP,
4775 .connect_ind = l2cap_connect_ind,
4776 .connect_cfm = l2cap_connect_cfm,
4777 .disconn_ind = l2cap_disconn_ind,
4778 .disconn_cfm = l2cap_disconn_cfm,
4779 .security_cfm = l2cap_security_cfm,
4780 .recv_acldata = l2cap_recv_acldata
4783 static int __init l2cap_init(void)
4787 err = proto_register(&l2cap_proto, 0);
4791 _busy_wq = create_singlethread_workqueue("l2cap");
4795 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4797 BT_ERR("L2CAP socket registration failed");
4801 err = hci_register_proto(&l2cap_hci_proto);
4803 BT_ERR("L2CAP protocol registration failed");
4804 bt_sock_unregister(BTPROTO_L2CAP);
4809 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4810 bt_debugfs, NULL, &l2cap_debugfs_fops);
4812 BT_ERR("Failed to create L2CAP debug file");
4815 BT_INFO("L2CAP ver %s", VERSION);
4816 BT_INFO("L2CAP socket layer initialized");
4821 proto_unregister(&l2cap_proto);
4825 static void __exit l2cap_exit(void)
4827 debugfs_remove(l2cap_debugfs);
4829 flush_workqueue(_busy_wq);
4830 destroy_workqueue(_busy_wq);
4832 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4833 BT_ERR("L2CAP socket unregistration failed");
4835 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4836 BT_ERR("L2CAP protocol unregistration failed");
4838 proto_unregister(&l2cap_proto);
4841 void l2cap_load(void)
4843 /* Dummy function to trigger automatic L2CAP module loading by
4844 * other modules that use L2CAP sockets but don't use any other
4845 * symbols from it. */
4847 EXPORT_SYMBOL(l2cap_load);
4849 module_init(l2cap_init);
4850 module_exit(l2cap_exit);
4852 module_param(enable_ertm, bool, 0644);
4853 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4855 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4856 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4857 MODULE_VERSION(VERSION);
4858 MODULE_LICENSE("GPL");
4859 MODULE_ALIAS("bt-proto-0");