2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
461 struct l2cap_disconn_req req;
466 skb_queue_purge(TX_QUEUE(sk));
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
479 sk->sk_state = BT_DISCONN;
482 /* ---- L2CAP connections ---- */
483 static void l2cap_conn_start(struct l2cap_conn *conn)
485 struct l2cap_chan_list *l = &conn->chan_list;
488 BT_DBG("conn %p", conn);
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
495 if (sk->sk_type != SOCK_SEQPACKET &&
496 sk->sk_type != SOCK_STREAM) {
501 if (sk->sk_state == BT_CONNECT) {
502 if (l2cap_check_security(sk) &&
503 __l2cap_no_conn_pending(sk)) {
504 struct l2cap_conn_req req;
505 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
506 req.psm = l2cap_pi(sk)->psm;
508 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
509 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
511 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
512 L2CAP_CONN_REQ, sizeof(req), &req);
514 } else if (sk->sk_state == BT_CONNECT2) {
515 struct l2cap_conn_rsp rsp;
516 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
517 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
519 if (l2cap_check_security(sk)) {
520 if (bt_sk(sk)->defer_setup) {
521 struct sock *parent = bt_sk(sk)->parent;
522 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
523 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
524 parent->sk_data_ready(parent, 0);
527 sk->sk_state = BT_CONFIG;
528 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
529 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
532 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
533 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
536 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
537 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
543 read_unlock(&l->lock);
546 static void l2cap_conn_ready(struct l2cap_conn *conn)
548 struct l2cap_chan_list *l = &conn->chan_list;
551 BT_DBG("conn %p", conn);
555 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
558 if (sk->sk_type != SOCK_SEQPACKET &&
559 sk->sk_type != SOCK_STREAM) {
560 l2cap_sock_clear_timer(sk);
561 sk->sk_state = BT_CONNECTED;
562 sk->sk_state_change(sk);
563 } else if (sk->sk_state == BT_CONNECT)
569 read_unlock(&l->lock);
572 /* Notify sockets that we cannot guaranty reliability anymore */
573 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
575 struct l2cap_chan_list *l = &conn->chan_list;
578 BT_DBG("conn %p", conn);
582 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
583 if (l2cap_pi(sk)->force_reliable)
587 read_unlock(&l->lock);
590 static void l2cap_info_timeout(unsigned long arg)
592 struct l2cap_conn *conn = (void *) arg;
594 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
595 conn->info_ident = 0;
597 l2cap_conn_start(conn);
600 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
602 struct l2cap_conn *conn = hcon->l2cap_data;
607 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
611 hcon->l2cap_data = conn;
614 BT_DBG("hcon %p conn %p", hcon, conn);
616 conn->mtu = hcon->hdev->acl_mtu;
617 conn->src = &hcon->hdev->bdaddr;
618 conn->dst = &hcon->dst;
622 spin_lock_init(&conn->lock);
623 rwlock_init(&conn->chan_list.lock);
625 setup_timer(&conn->info_timer, l2cap_info_timeout,
626 (unsigned long) conn);
628 conn->disc_reason = 0x13;
633 static void l2cap_conn_del(struct hci_conn *hcon, int err)
635 struct l2cap_conn *conn = hcon->l2cap_data;
641 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
643 kfree_skb(conn->rx_skb);
646 while ((sk = conn->chan_list.head)) {
648 l2cap_chan_del(sk, err);
653 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
654 del_timer_sync(&conn->info_timer);
656 hcon->l2cap_data = NULL;
660 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
662 struct l2cap_chan_list *l = &conn->chan_list;
663 write_lock_bh(&l->lock);
664 __l2cap_chan_add(conn, sk, parent);
665 write_unlock_bh(&l->lock);
668 /* ---- Socket interface ---- */
669 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
672 struct hlist_node *node;
673 sk_for_each(sk, node, &l2cap_sk_list.head)
674 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
681 /* Find socket with psm and source bdaddr.
682 * Returns closest match.
684 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
686 struct sock *sk = NULL, *sk1 = NULL;
687 struct hlist_node *node;
689 sk_for_each(sk, node, &l2cap_sk_list.head) {
690 if (state && sk->sk_state != state)
693 if (l2cap_pi(sk)->psm == psm) {
695 if (!bacmp(&bt_sk(sk)->src, src))
699 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
703 return node ? sk : sk1;
706 /* Find socket with given address (psm, src).
707 * Returns locked socket */
708 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
711 read_lock(&l2cap_sk_list.lock);
712 s = __l2cap_get_sock_by_psm(state, psm, src);
715 read_unlock(&l2cap_sk_list.lock);
719 static void l2cap_sock_destruct(struct sock *sk)
723 skb_queue_purge(&sk->sk_receive_queue);
724 skb_queue_purge(&sk->sk_write_queue);
727 static void l2cap_sock_cleanup_listen(struct sock *parent)
731 BT_DBG("parent %p", parent);
733 /* Close not yet accepted channels */
734 while ((sk = bt_accept_dequeue(parent, NULL)))
735 l2cap_sock_close(sk);
737 parent->sk_state = BT_CLOSED;
738 sock_set_flag(parent, SOCK_ZAPPED);
741 /* Kill socket (only if zapped and orphan)
742 * Must be called on unlocked socket.
744 static void l2cap_sock_kill(struct sock *sk)
746 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
749 BT_DBG("sk %p state %d", sk, sk->sk_state);
751 /* Kill poor orphan */
752 bt_sock_unlink(&l2cap_sk_list, sk);
753 sock_set_flag(sk, SOCK_DEAD);
757 static void __l2cap_sock_close(struct sock *sk, int reason)
759 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
761 switch (sk->sk_state) {
763 l2cap_sock_cleanup_listen(sk);
768 if (sk->sk_type == SOCK_SEQPACKET ||
769 sk->sk_type == SOCK_STREAM) {
770 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
772 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
773 l2cap_send_disconn_req(conn, sk);
775 l2cap_chan_del(sk, reason);
779 if (sk->sk_type == SOCK_SEQPACKET ||
780 sk->sk_type == SOCK_STREAM) {
781 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
782 struct l2cap_conn_rsp rsp;
785 if (bt_sk(sk)->defer_setup)
786 result = L2CAP_CR_SEC_BLOCK;
788 result = L2CAP_CR_BAD_PSM;
790 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
791 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
792 rsp.result = cpu_to_le16(result);
793 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
794 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
795 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
797 l2cap_chan_del(sk, reason);
802 l2cap_chan_del(sk, reason);
806 sock_set_flag(sk, SOCK_ZAPPED);
811 /* Must be called on unlocked socket. */
812 static void l2cap_sock_close(struct sock *sk)
814 l2cap_sock_clear_timer(sk);
816 __l2cap_sock_close(sk, ECONNRESET);
821 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
823 struct l2cap_pinfo *pi = l2cap_pi(sk);
828 sk->sk_type = parent->sk_type;
829 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
831 pi->imtu = l2cap_pi(parent)->imtu;
832 pi->omtu = l2cap_pi(parent)->omtu;
833 pi->mode = l2cap_pi(parent)->mode;
834 pi->fcs = l2cap_pi(parent)->fcs;
835 pi->max_tx = l2cap_pi(parent)->max_tx;
836 pi->tx_win = l2cap_pi(parent)->tx_win;
837 pi->sec_level = l2cap_pi(parent)->sec_level;
838 pi->role_switch = l2cap_pi(parent)->role_switch;
839 pi->force_reliable = l2cap_pi(parent)->force_reliable;
841 pi->imtu = L2CAP_DEFAULT_MTU;
843 if (enable_ertm && sk->sk_type == SOCK_STREAM)
844 pi->mode = L2CAP_MODE_ERTM;
846 pi->mode = L2CAP_MODE_BASIC;
847 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
848 pi->fcs = L2CAP_FCS_CRC16;
849 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
850 pi->sec_level = BT_SECURITY_LOW;
852 pi->force_reliable = 0;
855 /* Default config options */
857 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
858 skb_queue_head_init(TX_QUEUE(sk));
859 skb_queue_head_init(SREJ_QUEUE(sk));
860 skb_queue_head_init(BUSY_QUEUE(sk));
861 INIT_LIST_HEAD(SREJ_LIST(sk));
864 static struct proto l2cap_proto = {
866 .owner = THIS_MODULE,
867 .obj_size = sizeof(struct l2cap_pinfo)
870 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
874 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
878 sock_init_data(sock, sk);
879 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
881 sk->sk_destruct = l2cap_sock_destruct;
882 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
884 sock_reset_flag(sk, SOCK_ZAPPED);
886 sk->sk_protocol = proto;
887 sk->sk_state = BT_OPEN;
889 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
891 bt_sock_link(&l2cap_sk_list, sk);
895 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
900 BT_DBG("sock %p", sock);
902 sock->state = SS_UNCONNECTED;
904 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
905 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
906 return -ESOCKTNOSUPPORT;
908 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
911 sock->ops = &l2cap_sock_ops;
913 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
917 l2cap_sock_init(sk, NULL);
921 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
923 struct sock *sk = sock->sk;
924 struct sockaddr_l2 la;
929 if (!addr || addr->sa_family != AF_BLUETOOTH)
932 memset(&la, 0, sizeof(la));
933 len = min_t(unsigned int, sizeof(la), alen);
934 memcpy(&la, addr, len);
941 if (sk->sk_state != BT_OPEN) {
946 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
947 !capable(CAP_NET_BIND_SERVICE)) {
952 write_lock_bh(&l2cap_sk_list.lock);
954 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
957 /* Save source address */
958 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
959 l2cap_pi(sk)->psm = la.l2_psm;
960 l2cap_pi(sk)->sport = la.l2_psm;
961 sk->sk_state = BT_BOUND;
963 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
964 __le16_to_cpu(la.l2_psm) == 0x0003)
965 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
968 write_unlock_bh(&l2cap_sk_list.lock);
975 static int l2cap_do_connect(struct sock *sk)
977 bdaddr_t *src = &bt_sk(sk)->src;
978 bdaddr_t *dst = &bt_sk(sk)->dst;
979 struct l2cap_conn *conn;
980 struct hci_conn *hcon;
981 struct hci_dev *hdev;
985 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
988 hdev = hci_get_route(dst, src);
990 return -EHOSTUNREACH;
992 hci_dev_lock_bh(hdev);
996 if (sk->sk_type == SOCK_RAW) {
997 switch (l2cap_pi(sk)->sec_level) {
998 case BT_SECURITY_HIGH:
999 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1001 case BT_SECURITY_MEDIUM:
1002 auth_type = HCI_AT_DEDICATED_BONDING;
1005 auth_type = HCI_AT_NO_BONDING;
1008 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1009 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1010 auth_type = HCI_AT_NO_BONDING_MITM;
1012 auth_type = HCI_AT_NO_BONDING;
1014 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1015 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1017 switch (l2cap_pi(sk)->sec_level) {
1018 case BT_SECURITY_HIGH:
1019 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1021 case BT_SECURITY_MEDIUM:
1022 auth_type = HCI_AT_GENERAL_BONDING;
1025 auth_type = HCI_AT_NO_BONDING;
1030 hcon = hci_connect(hdev, ACL_LINK, dst,
1031 l2cap_pi(sk)->sec_level, auth_type);
1035 conn = l2cap_conn_add(hcon, 0);
1043 /* Update source addr of the socket */
1044 bacpy(src, conn->src);
1046 l2cap_chan_add(conn, sk, NULL);
1048 sk->sk_state = BT_CONNECT;
1049 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1051 if (hcon->state == BT_CONNECTED) {
1052 if (sk->sk_type != SOCK_SEQPACKET &&
1053 sk->sk_type != SOCK_STREAM) {
1054 l2cap_sock_clear_timer(sk);
1055 sk->sk_state = BT_CONNECTED;
1061 hci_dev_unlock_bh(hdev);
1066 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1068 struct sock *sk = sock->sk;
1069 struct sockaddr_l2 la;
1072 BT_DBG("sk %p", sk);
1074 if (!addr || alen < sizeof(addr->sa_family) ||
1075 addr->sa_family != AF_BLUETOOTH)
1078 memset(&la, 0, sizeof(la));
1079 len = min_t(unsigned int, sizeof(la), alen);
1080 memcpy(&la, addr, len);
1087 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1093 switch (l2cap_pi(sk)->mode) {
1094 case L2CAP_MODE_BASIC:
1096 case L2CAP_MODE_ERTM:
1097 case L2CAP_MODE_STREAMING:
1106 switch (sk->sk_state) {
1110 /* Already connecting */
1114 /* Already connected */
1127 /* Set destination address and psm */
1128 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1129 l2cap_pi(sk)->psm = la.l2_psm;
1131 err = l2cap_do_connect(sk);
1136 err = bt_sock_wait_state(sk, BT_CONNECTED,
1137 sock_sndtimeo(sk, flags & O_NONBLOCK));
1143 static int l2cap_sock_listen(struct socket *sock, int backlog)
1145 struct sock *sk = sock->sk;
1148 BT_DBG("sk %p backlog %d", sk, backlog);
1152 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1153 || sk->sk_state != BT_BOUND) {
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1171 if (!l2cap_pi(sk)->psm) {
1172 bdaddr_t *src = &bt_sk(sk)->src;
1177 write_lock_bh(&l2cap_sk_list.lock);
1179 for (psm = 0x1001; psm < 0x1100; psm += 2)
1180 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1181 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1182 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1187 write_unlock_bh(&l2cap_sk_list.lock);
1193 sk->sk_max_ack_backlog = backlog;
1194 sk->sk_ack_backlog = 0;
1195 sk->sk_state = BT_LISTEN;
1202 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1204 DECLARE_WAITQUEUE(wait, current);
1205 struct sock *sk = sock->sk, *nsk;
1209 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1211 if (sk->sk_state != BT_LISTEN) {
1216 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1218 BT_DBG("sk %p timeo %ld", sk, timeo);
1220 /* Wait for an incoming connection. (wake-one). */
1221 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1222 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1223 set_current_state(TASK_INTERRUPTIBLE);
1230 timeo = schedule_timeout(timeo);
1231 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1233 if (sk->sk_state != BT_LISTEN) {
1238 if (signal_pending(current)) {
1239 err = sock_intr_errno(timeo);
1243 set_current_state(TASK_RUNNING);
1244 remove_wait_queue(sk_sleep(sk), &wait);
1249 newsock->state = SS_CONNECTED;
1251 BT_DBG("new socket %p", nsk);
1258 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1260 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1261 struct sock *sk = sock->sk;
1263 BT_DBG("sock %p, sk %p", sock, sk);
1265 addr->sa_family = AF_BLUETOOTH;
1266 *len = sizeof(struct sockaddr_l2);
1269 la->l2_psm = l2cap_pi(sk)->psm;
1270 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1271 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1273 la->l2_psm = l2cap_pi(sk)->sport;
1274 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1275 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1281 static int __l2cap_wait_ack(struct sock *sk)
1283 DECLARE_WAITQUEUE(wait, current);
1287 add_wait_queue(sk_sleep(sk), &wait);
1288 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1294 if (signal_pending(current)) {
1295 err = sock_intr_errno(timeo);
1300 timeo = schedule_timeout(timeo);
1303 err = sock_error(sk);
1307 set_current_state(TASK_RUNNING);
1308 remove_wait_queue(sk_sleep(sk), &wait);
1312 static void l2cap_monitor_timeout(unsigned long arg)
1314 struct sock *sk = (void *) arg;
1317 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1318 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1323 l2cap_pi(sk)->retry_count++;
1324 __mod_monitor_timer();
1326 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1330 static void l2cap_retrans_timeout(unsigned long arg)
1332 struct sock *sk = (void *) arg;
1335 l2cap_pi(sk)->retry_count = 1;
1336 __mod_monitor_timer();
1338 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1340 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1344 static void l2cap_drop_acked_frames(struct sock *sk)
1346 struct sk_buff *skb;
1348 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1349 l2cap_pi(sk)->unacked_frames) {
1350 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1353 skb = skb_dequeue(TX_QUEUE(sk));
1356 l2cap_pi(sk)->unacked_frames--;
1359 if (!l2cap_pi(sk)->unacked_frames)
1360 del_timer(&l2cap_pi(sk)->retrans_timer);
1363 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1367 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1369 hci_send_acl(pi->conn->hcon, skb, 0);
1372 static int l2cap_streaming_send(struct sock *sk)
1374 struct sk_buff *skb, *tx_skb;
1375 struct l2cap_pinfo *pi = l2cap_pi(sk);
1378 while ((skb = sk->sk_send_head)) {
1379 tx_skb = skb_clone(skb, GFP_ATOMIC);
1381 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1382 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1383 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1385 if (pi->fcs == L2CAP_FCS_CRC16) {
1386 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1387 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1390 l2cap_do_send(sk, tx_skb);
1392 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1394 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1395 sk->sk_send_head = NULL;
1397 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1399 skb = skb_dequeue(TX_QUEUE(sk));
1405 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1407 struct l2cap_pinfo *pi = l2cap_pi(sk);
1408 struct sk_buff *skb, *tx_skb;
1411 skb = skb_peek(TX_QUEUE(sk));
1416 if (bt_cb(skb)->tx_seq == tx_seq)
1419 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1422 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1424 if (pi->remote_max_tx &&
1425 bt_cb(skb)->retries == pi->remote_max_tx) {
1426 l2cap_send_disconn_req(pi->conn, sk);
1430 tx_skb = skb_clone(skb, GFP_ATOMIC);
1431 bt_cb(skb)->retries++;
1432 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1433 control &= L2CAP_CTRL_SAR;
1435 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1436 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1437 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1439 if (pi->fcs == L2CAP_FCS_CRC16) {
1440 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1441 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1444 l2cap_do_send(sk, tx_skb);
1447 static int l2cap_ertm_send(struct sock *sk)
1449 struct sk_buff *skb, *tx_skb;
1450 struct l2cap_pinfo *pi = l2cap_pi(sk);
1454 if (sk->sk_state != BT_CONNECTED)
1457 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1459 if (pi->remote_max_tx &&
1460 bt_cb(skb)->retries == pi->remote_max_tx) {
1461 l2cap_send_disconn_req(pi->conn, sk);
1465 tx_skb = skb_clone(skb, GFP_ATOMIC);
1467 bt_cb(skb)->retries++;
1469 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1470 control &= L2CAP_CTRL_SAR;
1472 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1473 control |= L2CAP_CTRL_FINAL;
1474 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1476 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1477 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1478 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1481 if (pi->fcs == L2CAP_FCS_CRC16) {
1482 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1483 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1486 l2cap_do_send(sk, tx_skb);
1488 __mod_retrans_timer();
1490 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1491 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1493 pi->unacked_frames++;
1496 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1497 sk->sk_send_head = NULL;
1499 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1507 static int l2cap_retransmit_frames(struct sock *sk)
1509 struct l2cap_pinfo *pi = l2cap_pi(sk);
1512 spin_lock_bh(&pi->send_lock);
1514 if (!skb_queue_empty(TX_QUEUE(sk)))
1515 sk->sk_send_head = TX_QUEUE(sk)->next;
1517 pi->next_tx_seq = pi->expected_ack_seq;
1518 ret = l2cap_ertm_send(sk);
1520 spin_unlock_bh(&pi->send_lock);
1525 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1527 struct sock *sk = (struct sock *)pi;
1531 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1533 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1534 control |= L2CAP_SUPER_RCV_NOT_READY;
1535 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1536 l2cap_send_sframe(pi, control);
1540 spin_lock_bh(&pi->send_lock);
1541 nframes = l2cap_ertm_send(sk);
1542 spin_unlock_bh(&pi->send_lock);
1547 control |= L2CAP_SUPER_RCV_READY;
1548 l2cap_send_sframe(pi, control);
1551 static void l2cap_send_srejtail(struct sock *sk)
1553 struct srej_list *tail;
1556 control = L2CAP_SUPER_SELECT_REJECT;
1557 control |= L2CAP_CTRL_FINAL;
1559 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1560 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1562 l2cap_send_sframe(l2cap_pi(sk), control);
1565 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1567 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1568 struct sk_buff **frag;
1571 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1577 /* Continuation fragments (no L2CAP header) */
1578 frag = &skb_shinfo(skb)->frag_list;
1580 count = min_t(unsigned int, conn->mtu, len);
1582 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1585 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1591 frag = &(*frag)->next;
1597 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1599 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1600 struct sk_buff *skb;
1601 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1602 struct l2cap_hdr *lh;
1604 BT_DBG("sk %p len %d", sk, (int)len);
1606 count = min_t(unsigned int, (conn->mtu - hlen), len);
1607 skb = bt_skb_send_alloc(sk, count + hlen,
1608 msg->msg_flags & MSG_DONTWAIT, &err);
1610 return ERR_PTR(-ENOMEM);
1612 /* Create L2CAP header */
1613 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1614 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1615 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1616 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1618 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1619 if (unlikely(err < 0)) {
1621 return ERR_PTR(err);
1626 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1628 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1629 struct sk_buff *skb;
1630 int err, count, hlen = L2CAP_HDR_SIZE;
1631 struct l2cap_hdr *lh;
1633 BT_DBG("sk %p len %d", sk, (int)len);
1635 count = min_t(unsigned int, (conn->mtu - hlen), len);
1636 skb = bt_skb_send_alloc(sk, count + hlen,
1637 msg->msg_flags & MSG_DONTWAIT, &err);
1639 return ERR_PTR(-ENOMEM);
1641 /* Create L2CAP header */
1642 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1643 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1644 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1646 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1647 if (unlikely(err < 0)) {
1649 return ERR_PTR(err);
1654 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1656 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1657 struct sk_buff *skb;
1658 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1659 struct l2cap_hdr *lh;
1661 BT_DBG("sk %p len %d", sk, (int)len);
1664 return ERR_PTR(-ENOTCONN);
1669 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1672 count = min_t(unsigned int, (conn->mtu - hlen), len);
1673 skb = bt_skb_send_alloc(sk, count + hlen,
1674 msg->msg_flags & MSG_DONTWAIT, &err);
1676 return ERR_PTR(-ENOMEM);
1678 /* Create L2CAP header */
1679 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1680 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1681 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1682 put_unaligned_le16(control, skb_put(skb, 2));
1684 put_unaligned_le16(sdulen, skb_put(skb, 2));
1686 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1687 if (unlikely(err < 0)) {
1689 return ERR_PTR(err);
1692 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1693 put_unaligned_le16(0, skb_put(skb, 2));
1695 bt_cb(skb)->retries = 0;
1699 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1701 struct l2cap_pinfo *pi = l2cap_pi(sk);
1702 struct sk_buff *skb;
1703 struct sk_buff_head sar_queue;
1707 skb_queue_head_init(&sar_queue);
1708 control = L2CAP_SDU_START;
1709 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1711 return PTR_ERR(skb);
1713 __skb_queue_tail(&sar_queue, skb);
1714 len -= pi->remote_mps;
1715 size += pi->remote_mps;
1720 if (len > pi->remote_mps) {
1721 control = L2CAP_SDU_CONTINUE;
1722 buflen = pi->remote_mps;
1724 control = L2CAP_SDU_END;
1728 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1730 skb_queue_purge(&sar_queue);
1731 return PTR_ERR(skb);
1734 __skb_queue_tail(&sar_queue, skb);
1738 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1739 spin_lock_bh(&pi->send_lock);
1740 if (sk->sk_send_head == NULL)
1741 sk->sk_send_head = sar_queue.next;
1742 spin_unlock_bh(&pi->send_lock);
1747 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1749 struct sock *sk = sock->sk;
1750 struct l2cap_pinfo *pi = l2cap_pi(sk);
1751 struct sk_buff *skb;
1755 BT_DBG("sock %p, sk %p", sock, sk);
1757 err = sock_error(sk);
1761 if (msg->msg_flags & MSG_OOB)
1766 if (sk->sk_state != BT_CONNECTED) {
1771 /* Connectionless channel */
1772 if (sk->sk_type == SOCK_DGRAM) {
1773 skb = l2cap_create_connless_pdu(sk, msg, len);
1777 l2cap_do_send(sk, skb);
1784 case L2CAP_MODE_BASIC:
1785 /* Check outgoing MTU */
1786 if (len > pi->omtu) {
1791 /* Create a basic PDU */
1792 skb = l2cap_create_basic_pdu(sk, msg, len);
1798 l2cap_do_send(sk, skb);
1802 case L2CAP_MODE_ERTM:
1803 case L2CAP_MODE_STREAMING:
1804 /* Entire SDU fits into one PDU */
1805 if (len <= pi->remote_mps) {
1806 control = L2CAP_SDU_UNSEGMENTED;
1807 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1812 __skb_queue_tail(TX_QUEUE(sk), skb);
1814 if (pi->mode == L2CAP_MODE_ERTM)
1815 spin_lock_bh(&pi->send_lock);
1817 if (sk->sk_send_head == NULL)
1818 sk->sk_send_head = skb;
1820 if (pi->mode == L2CAP_MODE_ERTM)
1821 spin_unlock_bh(&pi->send_lock);
1823 /* Segment SDU into multiples PDUs */
1824 err = l2cap_sar_segment_sdu(sk, msg, len);
1829 if (pi->mode == L2CAP_MODE_STREAMING) {
1830 err = l2cap_streaming_send(sk);
1832 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1833 pi->conn_state && L2CAP_CONN_WAIT_F) {
1837 spin_lock_bh(&pi->send_lock);
1838 err = l2cap_ertm_send(sk);
1839 spin_unlock_bh(&pi->send_lock);
1847 BT_DBG("bad state %1.1x", pi->mode);
1856 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1858 struct sock *sk = sock->sk;
1862 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1863 struct l2cap_conn_rsp rsp;
1865 sk->sk_state = BT_CONFIG;
1867 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1868 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1869 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1870 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1871 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1872 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1880 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1883 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1885 struct sock *sk = sock->sk;
1886 struct l2cap_options opts;
1890 BT_DBG("sk %p", sk);
1896 opts.imtu = l2cap_pi(sk)->imtu;
1897 opts.omtu = l2cap_pi(sk)->omtu;
1898 opts.flush_to = l2cap_pi(sk)->flush_to;
1899 opts.mode = l2cap_pi(sk)->mode;
1900 opts.fcs = l2cap_pi(sk)->fcs;
1901 opts.max_tx = l2cap_pi(sk)->max_tx;
1902 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1904 len = min_t(unsigned int, sizeof(opts), optlen);
1905 if (copy_from_user((char *) &opts, optval, len)) {
1910 l2cap_pi(sk)->mode = opts.mode;
1911 switch (l2cap_pi(sk)->mode) {
1912 case L2CAP_MODE_BASIC:
1914 case L2CAP_MODE_ERTM:
1915 case L2CAP_MODE_STREAMING:
1924 l2cap_pi(sk)->imtu = opts.imtu;
1925 l2cap_pi(sk)->omtu = opts.omtu;
1926 l2cap_pi(sk)->fcs = opts.fcs;
1927 l2cap_pi(sk)->max_tx = opts.max_tx;
1928 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1932 if (get_user(opt, (u32 __user *) optval)) {
1937 if (opt & L2CAP_LM_AUTH)
1938 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1939 if (opt & L2CAP_LM_ENCRYPT)
1940 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1941 if (opt & L2CAP_LM_SECURE)
1942 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1944 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1945 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1957 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1959 struct sock *sk = sock->sk;
1960 struct bt_security sec;
1964 BT_DBG("sk %p", sk);
1966 if (level == SOL_L2CAP)
1967 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1969 if (level != SOL_BLUETOOTH)
1970 return -ENOPROTOOPT;
1976 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1977 && sk->sk_type != SOCK_RAW) {
1982 sec.level = BT_SECURITY_LOW;
1984 len = min_t(unsigned int, sizeof(sec), optlen);
1985 if (copy_from_user((char *) &sec, optval, len)) {
1990 if (sec.level < BT_SECURITY_LOW ||
1991 sec.level > BT_SECURITY_HIGH) {
1996 l2cap_pi(sk)->sec_level = sec.level;
1999 case BT_DEFER_SETUP:
2000 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2005 if (get_user(opt, (u32 __user *) optval)) {
2010 bt_sk(sk)->defer_setup = opt;
2022 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2024 struct sock *sk = sock->sk;
2025 struct l2cap_options opts;
2026 struct l2cap_conninfo cinfo;
2030 BT_DBG("sk %p", sk);
2032 if (get_user(len, optlen))
2039 opts.imtu = l2cap_pi(sk)->imtu;
2040 opts.omtu = l2cap_pi(sk)->omtu;
2041 opts.flush_to = l2cap_pi(sk)->flush_to;
2042 opts.mode = l2cap_pi(sk)->mode;
2043 opts.fcs = l2cap_pi(sk)->fcs;
2044 opts.max_tx = l2cap_pi(sk)->max_tx;
2045 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2047 len = min_t(unsigned int, len, sizeof(opts));
2048 if (copy_to_user(optval, (char *) &opts, len))
2054 switch (l2cap_pi(sk)->sec_level) {
2055 case BT_SECURITY_LOW:
2056 opt = L2CAP_LM_AUTH;
2058 case BT_SECURITY_MEDIUM:
2059 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2061 case BT_SECURITY_HIGH:
2062 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2070 if (l2cap_pi(sk)->role_switch)
2071 opt |= L2CAP_LM_MASTER;
2073 if (l2cap_pi(sk)->force_reliable)
2074 opt |= L2CAP_LM_RELIABLE;
2076 if (put_user(opt, (u32 __user *) optval))
2080 case L2CAP_CONNINFO:
2081 if (sk->sk_state != BT_CONNECTED &&
2082 !(sk->sk_state == BT_CONNECT2 &&
2083 bt_sk(sk)->defer_setup)) {
2088 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2089 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2091 len = min_t(unsigned int, len, sizeof(cinfo));
2092 if (copy_to_user(optval, (char *) &cinfo, len))
2106 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2108 struct sock *sk = sock->sk;
2109 struct bt_security sec;
2112 BT_DBG("sk %p", sk);
2114 if (level == SOL_L2CAP)
2115 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2117 if (level != SOL_BLUETOOTH)
2118 return -ENOPROTOOPT;
2120 if (get_user(len, optlen))
2127 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2128 && sk->sk_type != SOCK_RAW) {
2133 sec.level = l2cap_pi(sk)->sec_level;
2135 len = min_t(unsigned int, len, sizeof(sec));
2136 if (copy_to_user(optval, (char *) &sec, len))
2141 case BT_DEFER_SETUP:
2142 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2147 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2161 static int l2cap_sock_shutdown(struct socket *sock, int how)
2163 struct sock *sk = sock->sk;
2166 BT_DBG("sock %p, sk %p", sock, sk);
2172 if (!sk->sk_shutdown) {
2173 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2174 err = __l2cap_wait_ack(sk);
2176 sk->sk_shutdown = SHUTDOWN_MASK;
2177 l2cap_sock_clear_timer(sk);
2178 __l2cap_sock_close(sk, 0);
2180 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2181 err = bt_sock_wait_state(sk, BT_CLOSED,
2188 static int l2cap_sock_release(struct socket *sock)
2190 struct sock *sk = sock->sk;
2193 BT_DBG("sock %p, sk %p", sock, sk);
2198 err = l2cap_sock_shutdown(sock, 2);
2201 l2cap_sock_kill(sk);
2205 static void l2cap_chan_ready(struct sock *sk)
2207 struct sock *parent = bt_sk(sk)->parent;
2209 BT_DBG("sk %p, parent %p", sk, parent);
2211 l2cap_pi(sk)->conf_state = 0;
2212 l2cap_sock_clear_timer(sk);
2215 /* Outgoing channel.
2216 * Wake up socket sleeping on connect.
2218 sk->sk_state = BT_CONNECTED;
2219 sk->sk_state_change(sk);
2221 /* Incoming channel.
2222 * Wake up socket sleeping on accept.
2224 parent->sk_data_ready(parent, 0);
2228 /* Copy frame to all raw sockets on that connection */
2229 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2231 struct l2cap_chan_list *l = &conn->chan_list;
2232 struct sk_buff *nskb;
2235 BT_DBG("conn %p", conn);
2237 read_lock(&l->lock);
2238 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2239 if (sk->sk_type != SOCK_RAW)
2242 /* Don't send frame to the socket it came from */
2245 nskb = skb_clone(skb, GFP_ATOMIC);
2249 if (sock_queue_rcv_skb(sk, nskb))
2252 read_unlock(&l->lock);
2255 /* ---- L2CAP signalling commands ---- */
2256 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2257 u8 code, u8 ident, u16 dlen, void *data)
2259 struct sk_buff *skb, **frag;
2260 struct l2cap_cmd_hdr *cmd;
2261 struct l2cap_hdr *lh;
2264 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2265 conn, code, ident, dlen);
2267 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2268 count = min_t(unsigned int, conn->mtu, len);
2270 skb = bt_skb_alloc(count, GFP_ATOMIC);
2274 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2275 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2276 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2278 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2281 cmd->len = cpu_to_le16(dlen);
2284 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2285 memcpy(skb_put(skb, count), data, count);
2291 /* Continuation fragments (no L2CAP header) */
2292 frag = &skb_shinfo(skb)->frag_list;
2294 count = min_t(unsigned int, conn->mtu, len);
2296 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2300 memcpy(skb_put(*frag, count), data, count);
2305 frag = &(*frag)->next;
2315 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2317 struct l2cap_conf_opt *opt = *ptr;
2320 len = L2CAP_CONF_OPT_SIZE + opt->len;
2328 *val = *((u8 *) opt->val);
2332 *val = __le16_to_cpu(*((__le16 *) opt->val));
2336 *val = __le32_to_cpu(*((__le32 *) opt->val));
2340 *val = (unsigned long) opt->val;
2344 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2348 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2350 struct l2cap_conf_opt *opt = *ptr;
2352 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2359 *((u8 *) opt->val) = val;
2363 *((__le16 *) opt->val) = cpu_to_le16(val);
2367 *((__le32 *) opt->val) = cpu_to_le32(val);
2371 memcpy(opt->val, (void *) val, len);
2375 *ptr += L2CAP_CONF_OPT_SIZE + len;
2378 static void l2cap_ack_timeout(unsigned long arg)
2380 struct sock *sk = (void *) arg;
2383 l2cap_send_ack(l2cap_pi(sk));
2387 static inline void l2cap_ertm_init(struct sock *sk)
2389 l2cap_pi(sk)->expected_ack_seq = 0;
2390 l2cap_pi(sk)->unacked_frames = 0;
2391 l2cap_pi(sk)->buffer_seq = 0;
2392 l2cap_pi(sk)->num_acked = 0;
2393 l2cap_pi(sk)->frames_sent = 0;
2395 setup_timer(&l2cap_pi(sk)->retrans_timer,
2396 l2cap_retrans_timeout, (unsigned long) sk);
2397 setup_timer(&l2cap_pi(sk)->monitor_timer,
2398 l2cap_monitor_timeout, (unsigned long) sk);
2399 setup_timer(&l2cap_pi(sk)->ack_timer,
2400 l2cap_ack_timeout, (unsigned long) sk);
2402 __skb_queue_head_init(SREJ_QUEUE(sk));
2403 __skb_queue_head_init(BUSY_QUEUE(sk));
2404 spin_lock_init(&l2cap_pi(sk)->send_lock);
2406 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2409 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2411 u32 local_feat_mask = l2cap_feat_mask;
2413 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2416 case L2CAP_MODE_ERTM:
2417 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2418 case L2CAP_MODE_STREAMING:
2419 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2425 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2428 case L2CAP_MODE_STREAMING:
2429 case L2CAP_MODE_ERTM:
2430 if (l2cap_mode_supported(mode, remote_feat_mask))
2434 return L2CAP_MODE_BASIC;
2438 static int l2cap_build_conf_req(struct sock *sk, void *data)
2440 struct l2cap_pinfo *pi = l2cap_pi(sk);
2441 struct l2cap_conf_req *req = data;
2442 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2443 void *ptr = req->data;
2445 BT_DBG("sk %p", sk);
2447 if (pi->num_conf_req || pi->num_conf_rsp)
2451 case L2CAP_MODE_STREAMING:
2452 case L2CAP_MODE_ERTM:
2453 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2454 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2455 l2cap_send_disconn_req(pi->conn, sk);
2458 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2464 case L2CAP_MODE_BASIC:
2465 if (pi->imtu != L2CAP_DEFAULT_MTU)
2466 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2469 case L2CAP_MODE_ERTM:
2470 rfc.mode = L2CAP_MODE_ERTM;
2471 rfc.txwin_size = pi->tx_win;
2472 rfc.max_transmit = pi->max_tx;
2473 rfc.retrans_timeout = 0;
2474 rfc.monitor_timeout = 0;
2475 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2476 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2477 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2479 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2480 sizeof(rfc), (unsigned long) &rfc);
2482 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2485 if (pi->fcs == L2CAP_FCS_NONE ||
2486 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2487 pi->fcs = L2CAP_FCS_NONE;
2488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2492 case L2CAP_MODE_STREAMING:
2493 rfc.mode = L2CAP_MODE_STREAMING;
2495 rfc.max_transmit = 0;
2496 rfc.retrans_timeout = 0;
2497 rfc.monitor_timeout = 0;
2498 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2499 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2500 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2503 sizeof(rfc), (unsigned long) &rfc);
2505 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2508 if (pi->fcs == L2CAP_FCS_NONE ||
2509 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2510 pi->fcs = L2CAP_FCS_NONE;
2511 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2516 /* FIXME: Need actual value of the flush timeout */
2517 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2518 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2520 req->dcid = cpu_to_le16(pi->dcid);
2521 req->flags = cpu_to_le16(0);
2526 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2528 struct l2cap_pinfo *pi = l2cap_pi(sk);
2529 struct l2cap_conf_rsp *rsp = data;
2530 void *ptr = rsp->data;
2531 void *req = pi->conf_req;
2532 int len = pi->conf_len;
2533 int type, hint, olen;
2535 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2536 u16 mtu = L2CAP_DEFAULT_MTU;
2537 u16 result = L2CAP_CONF_SUCCESS;
2539 BT_DBG("sk %p", sk);
2541 while (len >= L2CAP_CONF_OPT_SIZE) {
2542 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2544 hint = type & L2CAP_CONF_HINT;
2545 type &= L2CAP_CONF_MASK;
2548 case L2CAP_CONF_MTU:
2552 case L2CAP_CONF_FLUSH_TO:
2556 case L2CAP_CONF_QOS:
2559 case L2CAP_CONF_RFC:
2560 if (olen == sizeof(rfc))
2561 memcpy(&rfc, (void *) val, olen);
2564 case L2CAP_CONF_FCS:
2565 if (val == L2CAP_FCS_NONE)
2566 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2574 result = L2CAP_CONF_UNKNOWN;
2575 *((u8 *) ptr++) = type;
2580 if (pi->num_conf_rsp || pi->num_conf_req)
2584 case L2CAP_MODE_STREAMING:
2585 case L2CAP_MODE_ERTM:
2586 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2587 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2588 return -ECONNREFUSED;
2591 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2596 if (pi->mode != rfc.mode) {
2597 result = L2CAP_CONF_UNACCEPT;
2598 rfc.mode = pi->mode;
2600 if (pi->num_conf_rsp == 1)
2601 return -ECONNREFUSED;
2603 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2604 sizeof(rfc), (unsigned long) &rfc);
2608 if (result == L2CAP_CONF_SUCCESS) {
2609 /* Configure output options and let the other side know
2610 * which ones we don't like. */
2612 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2613 result = L2CAP_CONF_UNACCEPT;
2616 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2618 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2621 case L2CAP_MODE_BASIC:
2622 pi->fcs = L2CAP_FCS_NONE;
2623 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2626 case L2CAP_MODE_ERTM:
2627 pi->remote_tx_win = rfc.txwin_size;
2628 pi->remote_max_tx = rfc.max_transmit;
2629 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2630 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2632 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2634 rfc.retrans_timeout =
2635 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2636 rfc.monitor_timeout =
2637 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2639 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2642 sizeof(rfc), (unsigned long) &rfc);
2646 case L2CAP_MODE_STREAMING:
2647 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2648 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2650 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2652 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2655 sizeof(rfc), (unsigned long) &rfc);
2660 result = L2CAP_CONF_UNACCEPT;
2662 memset(&rfc, 0, sizeof(rfc));
2663 rfc.mode = pi->mode;
2666 if (result == L2CAP_CONF_SUCCESS)
2667 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2669 rsp->scid = cpu_to_le16(pi->dcid);
2670 rsp->result = cpu_to_le16(result);
2671 rsp->flags = cpu_to_le16(0x0000);
2676 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2678 struct l2cap_pinfo *pi = l2cap_pi(sk);
2679 struct l2cap_conf_req *req = data;
2680 void *ptr = req->data;
2683 struct l2cap_conf_rfc rfc;
2685 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2687 while (len >= L2CAP_CONF_OPT_SIZE) {
2688 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2691 case L2CAP_CONF_MTU:
2692 if (val < L2CAP_DEFAULT_MIN_MTU) {
2693 *result = L2CAP_CONF_UNACCEPT;
2694 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2697 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2700 case L2CAP_CONF_FLUSH_TO:
2702 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2706 case L2CAP_CONF_RFC:
2707 if (olen == sizeof(rfc))
2708 memcpy(&rfc, (void *)val, olen);
2710 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2711 rfc.mode != pi->mode)
2712 return -ECONNREFUSED;
2714 pi->mode = rfc.mode;
2717 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2718 sizeof(rfc), (unsigned long) &rfc);
2723 if (*result == L2CAP_CONF_SUCCESS) {
2725 case L2CAP_MODE_ERTM:
2726 pi->remote_tx_win = rfc.txwin_size;
2727 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2728 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2729 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2731 case L2CAP_MODE_STREAMING:
2732 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2736 req->dcid = cpu_to_le16(pi->dcid);
2737 req->flags = cpu_to_le16(0x0000);
2742 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2744 struct l2cap_conf_rsp *rsp = data;
2745 void *ptr = rsp->data;
2747 BT_DBG("sk %p", sk);
2749 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2750 rsp->result = cpu_to_le16(result);
2751 rsp->flags = cpu_to_le16(flags);
2756 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2758 struct l2cap_pinfo *pi = l2cap_pi(sk);
2761 struct l2cap_conf_rfc rfc;
2763 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2765 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2768 while (len >= L2CAP_CONF_OPT_SIZE) {
2769 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2772 case L2CAP_CONF_RFC:
2773 if (olen == sizeof(rfc))
2774 memcpy(&rfc, (void *)val, olen);
2781 case L2CAP_MODE_ERTM:
2782 pi->remote_tx_win = rfc.txwin_size;
2783 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2784 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2785 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2787 case L2CAP_MODE_STREAMING:
2788 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2792 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2794 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2796 if (rej->reason != 0x0000)
2799 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2800 cmd->ident == conn->info_ident) {
2801 del_timer(&conn->info_timer);
2803 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2804 conn->info_ident = 0;
2806 l2cap_conn_start(conn);
2812 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2814 struct l2cap_chan_list *list = &conn->chan_list;
2815 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2816 struct l2cap_conn_rsp rsp;
2817 struct sock *sk, *parent;
2818 int result, status = L2CAP_CS_NO_INFO;
2820 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2821 __le16 psm = req->psm;
2823 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2825 /* Check if we have socket listening on psm */
2826 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2828 result = L2CAP_CR_BAD_PSM;
2832 /* Check if the ACL is secure enough (if not SDP) */
2833 if (psm != cpu_to_le16(0x0001) &&
2834 !hci_conn_check_link_mode(conn->hcon)) {
2835 conn->disc_reason = 0x05;
2836 result = L2CAP_CR_SEC_BLOCK;
2840 result = L2CAP_CR_NO_MEM;
2842 /* Check for backlog size */
2843 if (sk_acceptq_is_full(parent)) {
2844 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2848 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2852 write_lock_bh(&list->lock);
2854 /* Check if we already have channel with that dcid */
2855 if (__l2cap_get_chan_by_dcid(list, scid)) {
2856 write_unlock_bh(&list->lock);
2857 sock_set_flag(sk, SOCK_ZAPPED);
2858 l2cap_sock_kill(sk);
2862 hci_conn_hold(conn->hcon);
2864 l2cap_sock_init(sk, parent);
2865 bacpy(&bt_sk(sk)->src, conn->src);
2866 bacpy(&bt_sk(sk)->dst, conn->dst);
2867 l2cap_pi(sk)->psm = psm;
2868 l2cap_pi(sk)->dcid = scid;
2870 __l2cap_chan_add(conn, sk, parent);
2871 dcid = l2cap_pi(sk)->scid;
2873 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2875 l2cap_pi(sk)->ident = cmd->ident;
2877 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2878 if (l2cap_check_security(sk)) {
2879 if (bt_sk(sk)->defer_setup) {
2880 sk->sk_state = BT_CONNECT2;
2881 result = L2CAP_CR_PEND;
2882 status = L2CAP_CS_AUTHOR_PEND;
2883 parent->sk_data_ready(parent, 0);
2885 sk->sk_state = BT_CONFIG;
2886 result = L2CAP_CR_SUCCESS;
2887 status = L2CAP_CS_NO_INFO;
2890 sk->sk_state = BT_CONNECT2;
2891 result = L2CAP_CR_PEND;
2892 status = L2CAP_CS_AUTHEN_PEND;
2895 sk->sk_state = BT_CONNECT2;
2896 result = L2CAP_CR_PEND;
2897 status = L2CAP_CS_NO_INFO;
2900 write_unlock_bh(&list->lock);
2903 bh_unlock_sock(parent);
2906 rsp.scid = cpu_to_le16(scid);
2907 rsp.dcid = cpu_to_le16(dcid);
2908 rsp.result = cpu_to_le16(result);
2909 rsp.status = cpu_to_le16(status);
2910 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2912 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2913 struct l2cap_info_req info;
2914 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2916 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2917 conn->info_ident = l2cap_get_ident(conn);
2919 mod_timer(&conn->info_timer, jiffies +
2920 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2922 l2cap_send_cmd(conn, conn->info_ident,
2923 L2CAP_INFO_REQ, sizeof(info), &info);
2929 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2931 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2932 u16 scid, dcid, result, status;
2936 scid = __le16_to_cpu(rsp->scid);
2937 dcid = __le16_to_cpu(rsp->dcid);
2938 result = __le16_to_cpu(rsp->result);
2939 status = __le16_to_cpu(rsp->status);
2941 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2944 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2948 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2954 case L2CAP_CR_SUCCESS:
2955 sk->sk_state = BT_CONFIG;
2956 l2cap_pi(sk)->ident = 0;
2957 l2cap_pi(sk)->dcid = dcid;
2958 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2959 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2961 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2962 l2cap_build_conf_req(sk, req), req);
2963 l2cap_pi(sk)->num_conf_req++;
2967 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2971 l2cap_chan_del(sk, ECONNREFUSED);
2979 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2981 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2987 dcid = __le16_to_cpu(req->dcid);
2988 flags = __le16_to_cpu(req->flags);
2990 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2992 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2996 if (sk->sk_state == BT_DISCONN)
2999 /* Reject if config buffer is too small. */
3000 len = cmd_len - sizeof(*req);
3001 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3002 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3003 l2cap_build_conf_rsp(sk, rsp,
3004 L2CAP_CONF_REJECT, flags), rsp);
3009 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3010 l2cap_pi(sk)->conf_len += len;
3012 if (flags & 0x0001) {
3013 /* Incomplete config. Send empty response. */
3014 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3015 l2cap_build_conf_rsp(sk, rsp,
3016 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3020 /* Complete config. */
3021 len = l2cap_parse_conf_req(sk, rsp);
3023 l2cap_send_disconn_req(conn, sk);
3027 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3028 l2cap_pi(sk)->num_conf_rsp++;
3030 /* Reset config buffer. */
3031 l2cap_pi(sk)->conf_len = 0;
3033 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3036 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3037 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3038 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3039 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3041 sk->sk_state = BT_CONNECTED;
3043 l2cap_pi(sk)->next_tx_seq = 0;
3044 l2cap_pi(sk)->expected_tx_seq = 0;
3045 __skb_queue_head_init(TX_QUEUE(sk));
3046 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3047 l2cap_ertm_init(sk);
3049 l2cap_chan_ready(sk);
3053 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3055 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3056 l2cap_build_conf_req(sk, buf), buf);
3057 l2cap_pi(sk)->num_conf_req++;
3065 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3067 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3068 u16 scid, flags, result;
3070 int len = cmd->len - sizeof(*rsp);
3072 scid = __le16_to_cpu(rsp->scid);
3073 flags = __le16_to_cpu(rsp->flags);
3074 result = __le16_to_cpu(rsp->result);
3076 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3077 scid, flags, result);
3079 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3084 case L2CAP_CONF_SUCCESS:
3085 l2cap_conf_rfc_get(sk, rsp->data, len);
3088 case L2CAP_CONF_UNACCEPT:
3089 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3092 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3093 l2cap_send_disconn_req(conn, sk);
3097 /* throw out any old stored conf requests */
3098 result = L2CAP_CONF_SUCCESS;
3099 len = l2cap_parse_conf_rsp(sk, rsp->data,
3102 l2cap_send_disconn_req(conn, sk);
3106 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3107 L2CAP_CONF_REQ, len, req);
3108 l2cap_pi(sk)->num_conf_req++;
3109 if (result != L2CAP_CONF_SUCCESS)
3115 sk->sk_err = ECONNRESET;
3116 l2cap_sock_set_timer(sk, HZ * 5);
3117 l2cap_send_disconn_req(conn, sk);
3124 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3126 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3127 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3128 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3129 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3131 sk->sk_state = BT_CONNECTED;
3132 l2cap_pi(sk)->next_tx_seq = 0;
3133 l2cap_pi(sk)->expected_tx_seq = 0;
3134 __skb_queue_head_init(TX_QUEUE(sk));
3135 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3136 l2cap_ertm_init(sk);
3138 l2cap_chan_ready(sk);
3146 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3148 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3149 struct l2cap_disconn_rsp rsp;
3153 scid = __le16_to_cpu(req->scid);
3154 dcid = __le16_to_cpu(req->dcid);
3156 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3158 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3162 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3163 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3164 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3166 sk->sk_shutdown = SHUTDOWN_MASK;
3168 l2cap_chan_del(sk, ECONNRESET);
3171 l2cap_sock_kill(sk);
3175 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3177 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3181 scid = __le16_to_cpu(rsp->scid);
3182 dcid = __le16_to_cpu(rsp->dcid);
3184 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3186 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3190 l2cap_chan_del(sk, 0);
3193 l2cap_sock_kill(sk);
3197 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3199 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3202 type = __le16_to_cpu(req->type);
3204 BT_DBG("type 0x%4.4x", type);
3206 if (type == L2CAP_IT_FEAT_MASK) {
3208 u32 feat_mask = l2cap_feat_mask;
3209 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3210 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3211 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3213 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3215 put_unaligned_le32(feat_mask, rsp->data);
3216 l2cap_send_cmd(conn, cmd->ident,
3217 L2CAP_INFO_RSP, sizeof(buf), buf);
3218 } else if (type == L2CAP_IT_FIXED_CHAN) {
3220 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3221 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3222 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3223 memcpy(buf + 4, l2cap_fixed_chan, 8);
3224 l2cap_send_cmd(conn, cmd->ident,
3225 L2CAP_INFO_RSP, sizeof(buf), buf);
3227 struct l2cap_info_rsp rsp;
3228 rsp.type = cpu_to_le16(type);
3229 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3230 l2cap_send_cmd(conn, cmd->ident,
3231 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3237 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3239 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3242 type = __le16_to_cpu(rsp->type);
3243 result = __le16_to_cpu(rsp->result);
3245 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3247 del_timer(&conn->info_timer);
3249 if (type == L2CAP_IT_FEAT_MASK) {
3250 conn->feat_mask = get_unaligned_le32(rsp->data);
3252 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3253 struct l2cap_info_req req;
3254 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3256 conn->info_ident = l2cap_get_ident(conn);
3258 l2cap_send_cmd(conn, conn->info_ident,
3259 L2CAP_INFO_REQ, sizeof(req), &req);
3261 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3262 conn->info_ident = 0;
3264 l2cap_conn_start(conn);
3266 } else if (type == L2CAP_IT_FIXED_CHAN) {
3267 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3268 conn->info_ident = 0;
3270 l2cap_conn_start(conn);
3276 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3278 u8 *data = skb->data;
3280 struct l2cap_cmd_hdr cmd;
3283 l2cap_raw_recv(conn, skb);
3285 while (len >= L2CAP_CMD_HDR_SIZE) {
3287 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3288 data += L2CAP_CMD_HDR_SIZE;
3289 len -= L2CAP_CMD_HDR_SIZE;
3291 cmd_len = le16_to_cpu(cmd.len);
3293 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3295 if (cmd_len > len || !cmd.ident) {
3296 BT_DBG("corrupted command");
3301 case L2CAP_COMMAND_REJ:
3302 l2cap_command_rej(conn, &cmd, data);
3305 case L2CAP_CONN_REQ:
3306 err = l2cap_connect_req(conn, &cmd, data);
3309 case L2CAP_CONN_RSP:
3310 err = l2cap_connect_rsp(conn, &cmd, data);
3313 case L2CAP_CONF_REQ:
3314 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3317 case L2CAP_CONF_RSP:
3318 err = l2cap_config_rsp(conn, &cmd, data);
3321 case L2CAP_DISCONN_REQ:
3322 err = l2cap_disconnect_req(conn, &cmd, data);
3325 case L2CAP_DISCONN_RSP:
3326 err = l2cap_disconnect_rsp(conn, &cmd, data);
3329 case L2CAP_ECHO_REQ:
3330 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3333 case L2CAP_ECHO_RSP:
3336 case L2CAP_INFO_REQ:
3337 err = l2cap_information_req(conn, &cmd, data);
3340 case L2CAP_INFO_RSP:
3341 err = l2cap_information_rsp(conn, &cmd, data);
3345 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3351 struct l2cap_cmd_rej rej;
3352 BT_DBG("error %d", err);
3354 /* FIXME: Map err to a valid reason */
3355 rej.reason = cpu_to_le16(0);
3356 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3366 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3368 u16 our_fcs, rcv_fcs;
3369 int hdr_size = L2CAP_HDR_SIZE + 2;
3371 if (pi->fcs == L2CAP_FCS_CRC16) {
3372 skb_trim(skb, skb->len - 2);
3373 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3374 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3376 if (our_fcs != rcv_fcs)
3382 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3384 struct l2cap_pinfo *pi = l2cap_pi(sk);
3387 pi->frames_sent = 0;
3388 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3390 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3392 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3393 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3394 l2cap_send_sframe(pi, control);
3395 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3396 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3399 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3400 __mod_retrans_timer();
3402 spin_lock_bh(&pi->send_lock);
3403 l2cap_ertm_send(sk);
3404 spin_unlock_bh(&pi->send_lock);
3406 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3407 pi->frames_sent == 0) {
3408 control |= L2CAP_SUPER_RCV_READY;
3409 l2cap_send_sframe(pi, control);
3413 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3415 struct sk_buff *next_skb;
3416 struct l2cap_pinfo *pi = l2cap_pi(sk);
3417 int tx_seq_offset, next_tx_seq_offset;
3419 bt_cb(skb)->tx_seq = tx_seq;
3420 bt_cb(skb)->sar = sar;
3422 next_skb = skb_peek(SREJ_QUEUE(sk));
3424 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3428 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3429 if (tx_seq_offset < 0)
3430 tx_seq_offset += 64;
3433 if (bt_cb(next_skb)->tx_seq == tx_seq)
3436 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3437 pi->buffer_seq) % 64;
3438 if (next_tx_seq_offset < 0)
3439 next_tx_seq_offset += 64;
3441 if (next_tx_seq_offset > tx_seq_offset) {
3442 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3446 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3449 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3451 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3456 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3458 struct l2cap_pinfo *pi = l2cap_pi(sk);
3459 struct sk_buff *_skb;
3462 switch (control & L2CAP_CTRL_SAR) {
3463 case L2CAP_SDU_UNSEGMENTED:
3464 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3467 err = sock_queue_rcv_skb(sk, skb);
3473 case L2CAP_SDU_START:
3474 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3477 pi->sdu_len = get_unaligned_le16(skb->data);
3479 if (pi->sdu_len > pi->imtu)
3482 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3486 /* pull sdu_len bytes only after alloc, because of Local Busy
3487 * condition we have to be sure that this will be executed
3488 * only once, i.e., when alloc does not fail */
3491 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3493 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3494 pi->partial_sdu_len = skb->len;
3497 case L2CAP_SDU_CONTINUE:
3498 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3504 pi->partial_sdu_len += skb->len;
3505 if (pi->partial_sdu_len > pi->sdu_len)
3508 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3513 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3519 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3520 pi->partial_sdu_len += skb->len;
3522 if (pi->partial_sdu_len > pi->imtu)
3525 if (pi->partial_sdu_len != pi->sdu_len)
3528 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3531 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3533 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3537 err = sock_queue_rcv_skb(sk, _skb);
3540 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3544 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3545 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3559 l2cap_send_disconn_req(pi->conn, sk);
3564 static void l2cap_busy_work(struct work_struct *work)
3566 DECLARE_WAITQUEUE(wait, current);
3567 struct l2cap_pinfo *pi =
3568 container_of(work, struct l2cap_pinfo, busy_work);
3569 struct sock *sk = (struct sock *)pi;
3570 int n_tries = 0, timeo = HZ/5, err;
3571 struct sk_buff *skb;
3576 add_wait_queue(sk_sleep(sk), &wait);
3577 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3578 set_current_state(TASK_INTERRUPTIBLE);
3580 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3582 l2cap_send_disconn_req(pi->conn, sk);
3589 if (signal_pending(current)) {
3590 err = sock_intr_errno(timeo);
3595 timeo = schedule_timeout(timeo);
3598 err = sock_error(sk);
3602 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3603 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3604 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3606 skb_queue_head(BUSY_QUEUE(sk), skb);
3610 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3617 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3620 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3621 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3622 l2cap_send_sframe(pi, control);
3623 l2cap_pi(sk)->retry_count = 1;
3625 del_timer(&pi->retrans_timer);
3626 __mod_monitor_timer();
3628 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3631 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3632 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3634 set_current_state(TASK_RUNNING);
3635 remove_wait_queue(sk_sleep(sk), &wait);
3640 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3642 struct l2cap_pinfo *pi = l2cap_pi(sk);
3645 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3646 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3647 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3651 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3653 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3657 /* Busy Condition */
3658 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3659 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3660 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3662 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3663 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3664 l2cap_send_sframe(pi, sctrl);
3666 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3668 queue_work(_busy_wq, &pi->busy_work);
3673 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3675 struct l2cap_pinfo *pi = l2cap_pi(sk);
3676 struct sk_buff *_skb;
3680 * TODO: We have to notify the userland if some data is lost with the
3684 switch (control & L2CAP_CTRL_SAR) {
3685 case L2CAP_SDU_UNSEGMENTED:
3686 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3691 err = sock_queue_rcv_skb(sk, skb);
3697 case L2CAP_SDU_START:
3698 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3703 pi->sdu_len = get_unaligned_le16(skb->data);
3706 if (pi->sdu_len > pi->imtu) {
3711 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3717 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3719 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3720 pi->partial_sdu_len = skb->len;
3724 case L2CAP_SDU_CONTINUE:
3725 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3728 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3730 pi->partial_sdu_len += skb->len;
3731 if (pi->partial_sdu_len > pi->sdu_len)
3739 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3742 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3744 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3745 pi->partial_sdu_len += skb->len;
3747 if (pi->partial_sdu_len > pi->imtu)
3750 if (pi->partial_sdu_len == pi->sdu_len) {
3751 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3752 err = sock_queue_rcv_skb(sk, _skb);
3767 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3769 struct sk_buff *skb;
3772 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3773 if (bt_cb(skb)->tx_seq != tx_seq)
3776 skb = skb_dequeue(SREJ_QUEUE(sk));
3777 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3778 l2cap_ertm_reassembly_sdu(sk, skb, control);
3779 l2cap_pi(sk)->buffer_seq_srej =
3780 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3781 tx_seq = (tx_seq + 1) % 64;
3785 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3787 struct l2cap_pinfo *pi = l2cap_pi(sk);
3788 struct srej_list *l, *tmp;
3791 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3792 if (l->tx_seq == tx_seq) {
3797 control = L2CAP_SUPER_SELECT_REJECT;
3798 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3799 l2cap_send_sframe(pi, control);
3801 list_add_tail(&l->list, SREJ_LIST(sk));
3805 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3807 struct l2cap_pinfo *pi = l2cap_pi(sk);
3808 struct srej_list *new;
3811 while (tx_seq != pi->expected_tx_seq) {
3812 control = L2CAP_SUPER_SELECT_REJECT;
3813 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3814 l2cap_send_sframe(pi, control);
3816 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3817 new->tx_seq = pi->expected_tx_seq;
3818 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3819 list_add_tail(&new->list, SREJ_LIST(sk));
3821 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3824 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3826 struct l2cap_pinfo *pi = l2cap_pi(sk);
3827 u8 tx_seq = __get_txseq(rx_control);
3828 u8 req_seq = __get_reqseq(rx_control);
3829 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3830 int tx_seq_offset, expected_tx_seq_offset;
3831 int num_to_ack = (pi->tx_win/6) + 1;
3834 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3836 if (L2CAP_CTRL_FINAL & rx_control &&
3837 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3838 del_timer(&pi->monitor_timer);
3839 if (pi->unacked_frames > 0)
3840 __mod_retrans_timer();
3841 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3844 pi->expected_ack_seq = req_seq;
3845 l2cap_drop_acked_frames(sk);
3847 if (tx_seq == pi->expected_tx_seq)
3850 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3851 if (tx_seq_offset < 0)
3852 tx_seq_offset += 64;
3854 /* invalid tx_seq */
3855 if (tx_seq_offset >= pi->tx_win) {
3856 l2cap_send_disconn_req(pi->conn, sk);
3860 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3863 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3864 struct srej_list *first;
3866 first = list_first_entry(SREJ_LIST(sk),
3867 struct srej_list, list);
3868 if (tx_seq == first->tx_seq) {
3869 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3870 l2cap_check_srej_gap(sk, tx_seq);
3872 list_del(&first->list);
3875 if (list_empty(SREJ_LIST(sk))) {
3876 pi->buffer_seq = pi->buffer_seq_srej;
3877 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3881 struct srej_list *l;
3883 /* duplicated tx_seq */
3884 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3887 list_for_each_entry(l, SREJ_LIST(sk), list) {
3888 if (l->tx_seq == tx_seq) {
3889 l2cap_resend_srejframe(sk, tx_seq);
3893 l2cap_send_srejframe(sk, tx_seq);
3896 expected_tx_seq_offset =
3897 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3898 if (expected_tx_seq_offset < 0)
3899 expected_tx_seq_offset += 64;
3901 /* duplicated tx_seq */
3902 if (tx_seq_offset < expected_tx_seq_offset)
3905 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3907 INIT_LIST_HEAD(SREJ_LIST(sk));
3908 pi->buffer_seq_srej = pi->buffer_seq;
3910 __skb_queue_head_init(SREJ_QUEUE(sk));
3911 __skb_queue_head_init(BUSY_QUEUE(sk));
3912 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3914 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3916 l2cap_send_srejframe(sk, tx_seq);
3921 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3923 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3924 bt_cb(skb)->tx_seq = tx_seq;
3925 bt_cb(skb)->sar = sar;
3926 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3930 if (rx_control & L2CAP_CTRL_FINAL) {
3931 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3932 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3934 l2cap_retransmit_frames(sk);
3937 err = l2cap_push_rx_skb(sk, skb, rx_control);
3943 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3944 if (pi->num_acked == num_to_ack - 1)
3954 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3956 struct l2cap_pinfo *pi = l2cap_pi(sk);
3958 pi->expected_ack_seq = __get_reqseq(rx_control);
3959 l2cap_drop_acked_frames(sk);
3961 if (rx_control & L2CAP_CTRL_POLL) {
3962 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3963 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3964 (pi->unacked_frames > 0))
3965 __mod_retrans_timer();
3967 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3968 l2cap_send_srejtail(sk);
3970 l2cap_send_i_or_rr_or_rnr(sk);
3973 } else if (rx_control & L2CAP_CTRL_FINAL) {
3974 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3976 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3977 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3979 l2cap_retransmit_frames(sk);
3982 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3983 (pi->unacked_frames > 0))
3984 __mod_retrans_timer();
3986 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3987 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3990 spin_lock_bh(&pi->send_lock);
3991 l2cap_ertm_send(sk);
3992 spin_unlock_bh(&pi->send_lock);
3997 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3999 struct l2cap_pinfo *pi = l2cap_pi(sk);
4000 u8 tx_seq = __get_reqseq(rx_control);
4002 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4004 pi->expected_ack_seq = tx_seq;
4005 l2cap_drop_acked_frames(sk);
4007 if (rx_control & L2CAP_CTRL_FINAL) {
4008 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4009 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4011 l2cap_retransmit_frames(sk);
4013 l2cap_retransmit_frames(sk);
4015 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4016 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4019 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4021 struct l2cap_pinfo *pi = l2cap_pi(sk);
4022 u8 tx_seq = __get_reqseq(rx_control);
4024 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4026 if (rx_control & L2CAP_CTRL_POLL) {
4027 pi->expected_ack_seq = tx_seq;
4028 l2cap_drop_acked_frames(sk);
4029 l2cap_retransmit_one_frame(sk, tx_seq);
4031 spin_lock_bh(&pi->send_lock);
4032 l2cap_ertm_send(sk);
4033 spin_unlock_bh(&pi->send_lock);
4035 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4036 pi->srej_save_reqseq = tx_seq;
4037 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4039 } else if (rx_control & L2CAP_CTRL_FINAL) {
4040 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4041 pi->srej_save_reqseq == tx_seq)
4042 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4044 l2cap_retransmit_one_frame(sk, tx_seq);
4046 l2cap_retransmit_one_frame(sk, tx_seq);
4047 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4048 pi->srej_save_reqseq = tx_seq;
4049 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4054 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4056 struct l2cap_pinfo *pi = l2cap_pi(sk);
4057 u8 tx_seq = __get_reqseq(rx_control);
4059 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4060 pi->expected_ack_seq = tx_seq;
4061 l2cap_drop_acked_frames(sk);
4063 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4064 del_timer(&pi->retrans_timer);
4065 if (rx_control & L2CAP_CTRL_POLL)
4066 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4070 if (rx_control & L2CAP_CTRL_POLL)
4071 l2cap_send_srejtail(sk);
4073 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4076 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4078 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4080 if (L2CAP_CTRL_FINAL & rx_control &&
4081 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4082 del_timer(&l2cap_pi(sk)->monitor_timer);
4083 if (l2cap_pi(sk)->unacked_frames > 0)
4084 __mod_retrans_timer();
4085 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4088 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4089 case L2CAP_SUPER_RCV_READY:
4090 l2cap_data_channel_rrframe(sk, rx_control);
4093 case L2CAP_SUPER_REJECT:
4094 l2cap_data_channel_rejframe(sk, rx_control);
4097 case L2CAP_SUPER_SELECT_REJECT:
4098 l2cap_data_channel_srejframe(sk, rx_control);
4101 case L2CAP_SUPER_RCV_NOT_READY:
4102 l2cap_data_channel_rnrframe(sk, rx_control);
4110 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4113 struct l2cap_pinfo *pi;
4116 int len, next_tx_seq_offset, req_seq_offset;
4118 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4120 BT_DBG("unknown cid 0x%4.4x", cid);
4126 BT_DBG("sk %p, len %d", sk, skb->len);
4128 if (sk->sk_state != BT_CONNECTED)
4132 case L2CAP_MODE_BASIC:
4133 /* If socket recv buffers overflows we drop data here
4134 * which is *bad* because L2CAP has to be reliable.
4135 * But we don't have any other choice. L2CAP doesn't
4136 * provide flow control mechanism. */
4138 if (pi->imtu < skb->len)
4141 if (!sock_queue_rcv_skb(sk, skb))
4145 case L2CAP_MODE_ERTM:
4146 control = get_unaligned_le16(skb->data);
4150 if (__is_sar_start(control) && __is_iframe(control))
4153 if (pi->fcs == L2CAP_FCS_CRC16)
4157 * We can just drop the corrupted I-frame here.
4158 * Receiver will miss it and start proper recovery
4159 * procedures and ask retransmission.
4161 if (len > pi->mps) {
4162 l2cap_send_disconn_req(pi->conn, sk);
4166 if (l2cap_check_fcs(pi, skb))
4169 req_seq = __get_reqseq(control);
4170 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4171 if (req_seq_offset < 0)
4172 req_seq_offset += 64;
4174 next_tx_seq_offset =
4175 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4176 if (next_tx_seq_offset < 0)
4177 next_tx_seq_offset += 64;
4179 /* check for invalid req-seq */
4180 if (req_seq_offset > next_tx_seq_offset) {
4181 l2cap_send_disconn_req(pi->conn, sk);
4185 if (__is_iframe(control)) {
4187 l2cap_send_disconn_req(pi->conn, sk);
4191 l2cap_data_channel_iframe(sk, control, skb);
4194 l2cap_send_disconn_req(pi->conn, sk);
4198 l2cap_data_channel_sframe(sk, control, skb);
4203 case L2CAP_MODE_STREAMING:
4204 control = get_unaligned_le16(skb->data);
4208 if (__is_sar_start(control))
4211 if (pi->fcs == L2CAP_FCS_CRC16)
4214 if (len > pi->mps || len < 0 || __is_sframe(control))
4217 if (l2cap_check_fcs(pi, skb))
4220 tx_seq = __get_txseq(control);
4222 if (pi->expected_tx_seq == tx_seq)
4223 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4225 pi->expected_tx_seq = (tx_seq + 1) % 64;
4227 l2cap_streaming_reassembly_sdu(sk, skb, control);
4232 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4246 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4250 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4254 BT_DBG("sk %p, len %d", sk, skb->len);
4256 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4259 if (l2cap_pi(sk)->imtu < skb->len)
4262 if (!sock_queue_rcv_skb(sk, skb))
4274 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4276 struct l2cap_hdr *lh = (void *) skb->data;
4280 skb_pull(skb, L2CAP_HDR_SIZE);
4281 cid = __le16_to_cpu(lh->cid);
4282 len = __le16_to_cpu(lh->len);
4284 if (len != skb->len) {
4289 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4292 case L2CAP_CID_SIGNALING:
4293 l2cap_sig_channel(conn, skb);
4296 case L2CAP_CID_CONN_LESS:
4297 psm = get_unaligned_le16(skb->data);
4299 l2cap_conless_channel(conn, psm, skb);
4303 l2cap_data_channel(conn, cid, skb);
4308 /* ---- L2CAP interface with lower layer (HCI) ---- */
4310 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4312 int exact = 0, lm1 = 0, lm2 = 0;
4313 register struct sock *sk;
4314 struct hlist_node *node;
4316 if (type != ACL_LINK)
4319 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4321 /* Find listening sockets and check their link_mode */
4322 read_lock(&l2cap_sk_list.lock);
4323 sk_for_each(sk, node, &l2cap_sk_list.head) {
4324 if (sk->sk_state != BT_LISTEN)
4327 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4328 lm1 |= HCI_LM_ACCEPT;
4329 if (l2cap_pi(sk)->role_switch)
4330 lm1 |= HCI_LM_MASTER;
4332 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4333 lm2 |= HCI_LM_ACCEPT;
4334 if (l2cap_pi(sk)->role_switch)
4335 lm2 |= HCI_LM_MASTER;
4338 read_unlock(&l2cap_sk_list.lock);
4340 return exact ? lm1 : lm2;
4343 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4345 struct l2cap_conn *conn;
4347 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4349 if (hcon->type != ACL_LINK)
4353 conn = l2cap_conn_add(hcon, status);
4355 l2cap_conn_ready(conn);
4357 l2cap_conn_del(hcon, bt_err(status));
4362 static int l2cap_disconn_ind(struct hci_conn *hcon)
4364 struct l2cap_conn *conn = hcon->l2cap_data;
4366 BT_DBG("hcon %p", hcon);
4368 if (hcon->type != ACL_LINK || !conn)
4371 return conn->disc_reason;
4374 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4376 BT_DBG("hcon %p reason %d", hcon, reason);
4378 if (hcon->type != ACL_LINK)
4381 l2cap_conn_del(hcon, bt_err(reason));
4386 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4388 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4391 if (encrypt == 0x00) {
4392 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4393 l2cap_sock_clear_timer(sk);
4394 l2cap_sock_set_timer(sk, HZ * 5);
4395 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4396 __l2cap_sock_close(sk, ECONNREFUSED);
4398 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4399 l2cap_sock_clear_timer(sk);
4403 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4405 struct l2cap_chan_list *l;
4406 struct l2cap_conn *conn = hcon->l2cap_data;
4412 l = &conn->chan_list;
4414 BT_DBG("conn %p", conn);
4416 read_lock(&l->lock);
4418 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4421 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4426 if (!status && (sk->sk_state == BT_CONNECTED ||
4427 sk->sk_state == BT_CONFIG)) {
4428 l2cap_check_encryption(sk, encrypt);
4433 if (sk->sk_state == BT_CONNECT) {
4435 struct l2cap_conn_req req;
4436 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4437 req.psm = l2cap_pi(sk)->psm;
4439 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4440 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4442 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4443 L2CAP_CONN_REQ, sizeof(req), &req);
4445 l2cap_sock_clear_timer(sk);
4446 l2cap_sock_set_timer(sk, HZ / 10);
4448 } else if (sk->sk_state == BT_CONNECT2) {
4449 struct l2cap_conn_rsp rsp;
4453 sk->sk_state = BT_CONFIG;
4454 result = L2CAP_CR_SUCCESS;
4456 sk->sk_state = BT_DISCONN;
4457 l2cap_sock_set_timer(sk, HZ / 10);
4458 result = L2CAP_CR_SEC_BLOCK;
4461 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4462 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4463 rsp.result = cpu_to_le16(result);
4464 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4465 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4466 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4472 read_unlock(&l->lock);
4477 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4479 struct l2cap_conn *conn = hcon->l2cap_data;
4481 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4484 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4486 if (flags & ACL_START) {
4487 struct l2cap_hdr *hdr;
4491 BT_ERR("Unexpected start frame (len %d)", skb->len);
4492 kfree_skb(conn->rx_skb);
4493 conn->rx_skb = NULL;
4495 l2cap_conn_unreliable(conn, ECOMM);
4499 BT_ERR("Frame is too short (len %d)", skb->len);
4500 l2cap_conn_unreliable(conn, ECOMM);
4504 hdr = (struct l2cap_hdr *) skb->data;
4505 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4507 if (len == skb->len) {
4508 /* Complete frame received */
4509 l2cap_recv_frame(conn, skb);
4513 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4515 if (skb->len > len) {
4516 BT_ERR("Frame is too long (len %d, expected len %d)",
4518 l2cap_conn_unreliable(conn, ECOMM);
4522 /* Allocate skb for the complete frame (with header) */
4523 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4527 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4529 conn->rx_len = len - skb->len;
4531 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4533 if (!conn->rx_len) {
4534 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4535 l2cap_conn_unreliable(conn, ECOMM);
4539 if (skb->len > conn->rx_len) {
4540 BT_ERR("Fragment is too long (len %d, expected %d)",
4541 skb->len, conn->rx_len);
4542 kfree_skb(conn->rx_skb);
4543 conn->rx_skb = NULL;
4545 l2cap_conn_unreliable(conn, ECOMM);
4549 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4551 conn->rx_len -= skb->len;
4553 if (!conn->rx_len) {
4554 /* Complete frame received */
4555 l2cap_recv_frame(conn, conn->rx_skb);
4556 conn->rx_skb = NULL;
4565 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4568 struct hlist_node *node;
4570 read_lock_bh(&l2cap_sk_list.lock);
4572 sk_for_each(sk, node, &l2cap_sk_list.head) {
4573 struct l2cap_pinfo *pi = l2cap_pi(sk);
4575 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4576 batostr(&bt_sk(sk)->src),
4577 batostr(&bt_sk(sk)->dst),
4578 sk->sk_state, __le16_to_cpu(pi->psm),
4580 pi->imtu, pi->omtu, pi->sec_level);
4583 read_unlock_bh(&l2cap_sk_list.lock);
4588 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4590 return single_open(file, l2cap_debugfs_show, inode->i_private);
4593 static const struct file_operations l2cap_debugfs_fops = {
4594 .open = l2cap_debugfs_open,
4596 .llseek = seq_lseek,
4597 .release = single_release,
4600 static struct dentry *l2cap_debugfs;
4602 static const struct proto_ops l2cap_sock_ops = {
4603 .family = PF_BLUETOOTH,
4604 .owner = THIS_MODULE,
4605 .release = l2cap_sock_release,
4606 .bind = l2cap_sock_bind,
4607 .connect = l2cap_sock_connect,
4608 .listen = l2cap_sock_listen,
4609 .accept = l2cap_sock_accept,
4610 .getname = l2cap_sock_getname,
4611 .sendmsg = l2cap_sock_sendmsg,
4612 .recvmsg = l2cap_sock_recvmsg,
4613 .poll = bt_sock_poll,
4614 .ioctl = bt_sock_ioctl,
4615 .mmap = sock_no_mmap,
4616 .socketpair = sock_no_socketpair,
4617 .shutdown = l2cap_sock_shutdown,
4618 .setsockopt = l2cap_sock_setsockopt,
4619 .getsockopt = l2cap_sock_getsockopt
4622 static const struct net_proto_family l2cap_sock_family_ops = {
4623 .family = PF_BLUETOOTH,
4624 .owner = THIS_MODULE,
4625 .create = l2cap_sock_create,
4628 static struct hci_proto l2cap_hci_proto = {
4630 .id = HCI_PROTO_L2CAP,
4631 .connect_ind = l2cap_connect_ind,
4632 .connect_cfm = l2cap_connect_cfm,
4633 .disconn_ind = l2cap_disconn_ind,
4634 .disconn_cfm = l2cap_disconn_cfm,
4635 .security_cfm = l2cap_security_cfm,
4636 .recv_acldata = l2cap_recv_acldata
4639 static int __init l2cap_init(void)
4643 err = proto_register(&l2cap_proto, 0);
4647 _busy_wq = create_singlethread_workqueue("l2cap");
4651 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4653 BT_ERR("L2CAP socket registration failed");
4657 err = hci_register_proto(&l2cap_hci_proto);
4659 BT_ERR("L2CAP protocol registration failed");
4660 bt_sock_unregister(BTPROTO_L2CAP);
4665 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4666 bt_debugfs, NULL, &l2cap_debugfs_fops);
4668 BT_ERR("Failed to create L2CAP debug file");
4671 BT_INFO("L2CAP ver %s", VERSION);
4672 BT_INFO("L2CAP socket layer initialized");
4677 proto_unregister(&l2cap_proto);
4681 static void __exit l2cap_exit(void)
4683 debugfs_remove(l2cap_debugfs);
4685 flush_workqueue(_busy_wq);
4686 destroy_workqueue(_busy_wq);
4688 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4689 BT_ERR("L2CAP socket unregistration failed");
4691 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4692 BT_ERR("L2CAP protocol unregistration failed");
4694 proto_unregister(&l2cap_proto);
4697 void l2cap_load(void)
4699 /* Dummy function to trigger automatic L2CAP module loading by
4700 * other modules that use L2CAP sockets but don't use any other
4701 * symbols from it. */
4703 EXPORT_SYMBOL(l2cap_load);
4705 module_init(l2cap_init);
4706 module_exit(l2cap_exit);
4708 module_param(enable_ertm, bool, 0644);
4709 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4711 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4712 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4713 MODULE_VERSION(VERSION);
4714 MODULE_LICENSE("GPL");
4715 MODULE_ALIAS("bt-proto-0");