2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.14"
55 static int enable_ertm = 0;
57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
58 static u8 l2cap_fixed_chan[8] = { 0x02, };
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
79 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
84 reason = ECONNREFUSED;
85 else if (sk->sk_state == BT_CONNECT &&
86 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
87 reason = ECONNREFUSED;
91 __l2cap_sock_close(sk, reason);
99 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 static void l2cap_sock_clear_timer(struct sock *sk)
107 BT_DBG("sock %p state %d", sk, sk->sk_state);
108 sk_stop_timer(sk, &sk->sk_timer);
111 /* ---- L2CAP channels ---- */
112 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
116 if (l2cap_pi(s)->dcid == cid)
122 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->scid == cid)
132 /* Find channel with given SCID.
133 * Returns locked socket */
134 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 s = __l2cap_get_chan_by_scid(l, cid);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
162 read_unlock(&l->lock);
166 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(l, cid))
178 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 l2cap_pi(l->head)->prev_c = sk;
185 l2cap_pi(sk)->next_c = l->head;
186 l2cap_pi(sk)->prev_c = NULL;
190 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194 write_lock_bh(&l->lock);
199 l2cap_pi(next)->prev_c = prev;
201 l2cap_pi(prev)->next_c = next;
202 write_unlock_bh(&l->lock);
207 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 struct l2cap_chan_list *l = &conn->chan_list;
211 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
212 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214 conn->disc_reason = 0x13;
216 l2cap_pi(sk)->conn = conn;
218 if (sk->sk_type == SOCK_SEQPACKET) {
219 /* Alloc CID for connection-oriented socket */
220 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
221 } else if (sk->sk_type == SOCK_DGRAM) {
222 /* Connectionless socket */
223 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
224 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 /* Raw socket can send/recv signalling messages only */
228 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
229 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 __l2cap_chan_link(l, sk);
236 bt_accept_enqueue(parent, sk);
240 * Must be called on the locked socket. */
241 static void l2cap_chan_del(struct sock *sk, int err)
243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
244 struct sock *parent = bt_sk(sk)->parent;
246 l2cap_sock_clear_timer(sk);
248 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 /* Unlink from channel list */
252 l2cap_chan_unlink(&conn->chan_list, sk);
253 l2cap_pi(sk)->conn = NULL;
254 hci_conn_put(conn->hcon);
257 sk->sk_state = BT_CLOSED;
258 sock_set_flag(sk, SOCK_ZAPPED);
264 bt_accept_unlink(sk);
265 parent->sk_data_ready(parent, 0);
267 sk->sk_state_change(sk);
270 /* Service level security */
271 static inline int l2cap_check_security(struct sock *sk)
273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
278 auth_type = HCI_AT_NO_BONDING_MITM;
280 auth_type = HCI_AT_NO_BONDING;
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 case BT_SECURITY_MEDIUM:
290 auth_type = HCI_AT_GENERAL_BONDING;
293 auth_type = HCI_AT_NO_BONDING;
298 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
302 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 /* Get next available identificator.
307 * 1 - 128 are used by kernel.
308 * 129 - 199 are reserved.
309 * 200 - 254 are used by utilities like l2ping, etc.
312 spin_lock_bh(&conn->lock);
314 if (++conn->tx_ident > 128)
319 spin_unlock_bh(&conn->lock);
324 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
333 return hci_send_acl(conn->hcon, skb, 0);
336 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339 struct l2cap_hdr *lh;
340 struct l2cap_conn *conn = pi->conn;
343 BT_DBG("pi %p, control 0x%2.2x", pi, control);
345 count = min_t(unsigned int, conn->mtu, L2CAP_HDR_SIZE + 2);
346 control |= L2CAP_CTRL_FRAME_TYPE;
348 skb = bt_skb_alloc(count, GFP_ATOMIC);
352 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
353 lh->len = cpu_to_le16(2);
354 lh->cid = cpu_to_le16(pi->dcid);
355 put_unaligned_le16(control, skb_put(skb, 2));
357 return hci_send_acl(pi->conn->hcon, skb, 0);
360 static void l2cap_do_start(struct sock *sk)
362 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
364 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
365 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
368 if (l2cap_check_security(sk)) {
369 struct l2cap_conn_req req;
370 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
371 req.psm = l2cap_pi(sk)->psm;
373 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
375 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
376 L2CAP_CONN_REQ, sizeof(req), &req);
379 struct l2cap_info_req req;
380 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
383 conn->info_ident = l2cap_get_ident(conn);
385 mod_timer(&conn->info_timer, jiffies +
386 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
388 l2cap_send_cmd(conn, conn->info_ident,
389 L2CAP_INFO_REQ, sizeof(req), &req);
393 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
395 struct l2cap_disconn_req req;
397 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
398 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
399 l2cap_send_cmd(conn, l2cap_get_ident(conn),
400 L2CAP_DISCONN_REQ, sizeof(req), &req);
403 /* ---- L2CAP connections ---- */
404 static void l2cap_conn_start(struct l2cap_conn *conn)
406 struct l2cap_chan_list *l = &conn->chan_list;
409 BT_DBG("conn %p", conn);
413 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
416 if (sk->sk_type != SOCK_SEQPACKET) {
421 if (sk->sk_state == BT_CONNECT) {
422 if (l2cap_check_security(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
429 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
430 L2CAP_CONN_REQ, sizeof(req), &req);
432 } else if (sk->sk_state == BT_CONNECT2) {
433 struct l2cap_conn_rsp rsp;
434 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
435 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
437 if (l2cap_check_security(sk)) {
438 if (bt_sk(sk)->defer_setup) {
439 struct sock *parent = bt_sk(sk)->parent;
440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
442 parent->sk_data_ready(parent, 0);
445 sk->sk_state = BT_CONFIG;
446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
454 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
455 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
461 read_unlock(&l->lock);
464 static void l2cap_conn_ready(struct l2cap_conn *conn)
466 struct l2cap_chan_list *l = &conn->chan_list;
469 BT_DBG("conn %p", conn);
473 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
476 if (sk->sk_type != SOCK_SEQPACKET) {
477 l2cap_sock_clear_timer(sk);
478 sk->sk_state = BT_CONNECTED;
479 sk->sk_state_change(sk);
480 } else if (sk->sk_state == BT_CONNECT)
486 read_unlock(&l->lock);
489 /* Notify sockets that we cannot guaranty reliability anymore */
490 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
492 struct l2cap_chan_list *l = &conn->chan_list;
495 BT_DBG("conn %p", conn);
499 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
500 if (l2cap_pi(sk)->force_reliable)
504 read_unlock(&l->lock);
507 static void l2cap_info_timeout(unsigned long arg)
509 struct l2cap_conn *conn = (void *) arg;
511 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
512 conn->info_ident = 0;
514 l2cap_conn_start(conn);
517 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
519 struct l2cap_conn *conn = hcon->l2cap_data;
524 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
528 hcon->l2cap_data = conn;
531 BT_DBG("hcon %p conn %p", hcon, conn);
533 conn->mtu = hcon->hdev->acl_mtu;
534 conn->src = &hcon->hdev->bdaddr;
535 conn->dst = &hcon->dst;
539 setup_timer(&conn->info_timer, l2cap_info_timeout,
540 (unsigned long) conn);
542 spin_lock_init(&conn->lock);
543 rwlock_init(&conn->chan_list.lock);
545 conn->disc_reason = 0x13;
550 static void l2cap_conn_del(struct hci_conn *hcon, int err)
552 struct l2cap_conn *conn = hcon->l2cap_data;
558 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
560 kfree_skb(conn->rx_skb);
563 while ((sk = conn->chan_list.head)) {
565 l2cap_chan_del(sk, err);
570 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
571 del_timer_sync(&conn->info_timer);
573 hcon->l2cap_data = NULL;
577 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
579 struct l2cap_chan_list *l = &conn->chan_list;
580 write_lock_bh(&l->lock);
581 __l2cap_chan_add(conn, sk, parent);
582 write_unlock_bh(&l->lock);
585 /* ---- Socket interface ---- */
586 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
589 struct hlist_node *node;
590 sk_for_each(sk, node, &l2cap_sk_list.head)
591 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
598 /* Find socket with psm and source bdaddr.
599 * Returns closest match.
601 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
603 struct sock *sk = NULL, *sk1 = NULL;
604 struct hlist_node *node;
606 sk_for_each(sk, node, &l2cap_sk_list.head) {
607 if (state && sk->sk_state != state)
610 if (l2cap_pi(sk)->psm == psm) {
612 if (!bacmp(&bt_sk(sk)->src, src))
616 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
620 return node ? sk : sk1;
623 /* Find socket with given address (psm, src).
624 * Returns locked socket */
625 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
628 read_lock(&l2cap_sk_list.lock);
629 s = __l2cap_get_sock_by_psm(state, psm, src);
632 read_unlock(&l2cap_sk_list.lock);
636 static void l2cap_sock_destruct(struct sock *sk)
640 skb_queue_purge(&sk->sk_receive_queue);
641 skb_queue_purge(&sk->sk_write_queue);
644 static void l2cap_sock_cleanup_listen(struct sock *parent)
648 BT_DBG("parent %p", parent);
650 /* Close not yet accepted channels */
651 while ((sk = bt_accept_dequeue(parent, NULL)))
652 l2cap_sock_close(sk);
654 parent->sk_state = BT_CLOSED;
655 sock_set_flag(parent, SOCK_ZAPPED);
658 /* Kill socket (only if zapped and orphan)
659 * Must be called on unlocked socket.
661 static void l2cap_sock_kill(struct sock *sk)
663 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
666 BT_DBG("sk %p state %d", sk, sk->sk_state);
668 /* Kill poor orphan */
669 bt_sock_unlink(&l2cap_sk_list, sk);
670 sock_set_flag(sk, SOCK_DEAD);
674 static void __l2cap_sock_close(struct sock *sk, int reason)
676 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
678 switch (sk->sk_state) {
680 l2cap_sock_cleanup_listen(sk);
685 if (sk->sk_type == SOCK_SEQPACKET) {
686 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
688 sk->sk_state = BT_DISCONN;
689 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
690 l2cap_send_disconn_req(conn, sk);
692 l2cap_chan_del(sk, reason);
696 if (sk->sk_type == SOCK_SEQPACKET) {
697 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
698 struct l2cap_conn_rsp rsp;
701 if (bt_sk(sk)->defer_setup)
702 result = L2CAP_CR_SEC_BLOCK;
704 result = L2CAP_CR_BAD_PSM;
706 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
707 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
708 rsp.result = cpu_to_le16(result);
709 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
710 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
711 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
713 l2cap_chan_del(sk, reason);
718 l2cap_chan_del(sk, reason);
722 sock_set_flag(sk, SOCK_ZAPPED);
727 /* Must be called on unlocked socket. */
728 static void l2cap_sock_close(struct sock *sk)
730 l2cap_sock_clear_timer(sk);
732 __l2cap_sock_close(sk, ECONNRESET);
737 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
739 struct l2cap_pinfo *pi = l2cap_pi(sk);
744 sk->sk_type = parent->sk_type;
745 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
747 pi->imtu = l2cap_pi(parent)->imtu;
748 pi->omtu = l2cap_pi(parent)->omtu;
749 pi->mode = l2cap_pi(parent)->mode;
750 pi->fcs = l2cap_pi(parent)->fcs;
751 pi->sec_level = l2cap_pi(parent)->sec_level;
752 pi->role_switch = l2cap_pi(parent)->role_switch;
753 pi->force_reliable = l2cap_pi(parent)->force_reliable;
755 pi->imtu = L2CAP_DEFAULT_MTU;
757 pi->mode = L2CAP_MODE_BASIC;
758 pi->fcs = L2CAP_FCS_CRC16;
759 pi->sec_level = BT_SECURITY_LOW;
761 pi->force_reliable = 0;
764 /* Default config options */
766 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
769 static struct proto l2cap_proto = {
771 .owner = THIS_MODULE,
772 .obj_size = sizeof(struct l2cap_pinfo)
775 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
779 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
783 sock_init_data(sock, sk);
784 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
786 sk->sk_destruct = l2cap_sock_destruct;
787 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
789 sock_reset_flag(sk, SOCK_ZAPPED);
791 sk->sk_protocol = proto;
792 sk->sk_state = BT_OPEN;
794 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
796 bt_sock_link(&l2cap_sk_list, sk);
800 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
804 BT_DBG("sock %p", sock);
806 sock->state = SS_UNCONNECTED;
808 if (sock->type != SOCK_SEQPACKET &&
809 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
810 return -ESOCKTNOSUPPORT;
812 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
815 sock->ops = &l2cap_sock_ops;
817 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
821 l2cap_sock_init(sk, NULL);
825 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
827 struct sock *sk = sock->sk;
828 struct sockaddr_l2 la;
833 if (!addr || addr->sa_family != AF_BLUETOOTH)
836 memset(&la, 0, sizeof(la));
837 len = min_t(unsigned int, sizeof(la), alen);
838 memcpy(&la, addr, len);
845 if (sk->sk_state != BT_OPEN) {
850 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
851 !capable(CAP_NET_BIND_SERVICE)) {
856 write_lock_bh(&l2cap_sk_list.lock);
858 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
861 /* Save source address */
862 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
863 l2cap_pi(sk)->psm = la.l2_psm;
864 l2cap_pi(sk)->sport = la.l2_psm;
865 sk->sk_state = BT_BOUND;
867 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
868 __le16_to_cpu(la.l2_psm) == 0x0003)
869 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
872 write_unlock_bh(&l2cap_sk_list.lock);
879 static int l2cap_do_connect(struct sock *sk)
881 bdaddr_t *src = &bt_sk(sk)->src;
882 bdaddr_t *dst = &bt_sk(sk)->dst;
883 struct l2cap_conn *conn;
884 struct hci_conn *hcon;
885 struct hci_dev *hdev;
889 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
892 hdev = hci_get_route(dst, src);
894 return -EHOSTUNREACH;
896 hci_dev_lock_bh(hdev);
900 if (sk->sk_type == SOCK_RAW) {
901 switch (l2cap_pi(sk)->sec_level) {
902 case BT_SECURITY_HIGH:
903 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
905 case BT_SECURITY_MEDIUM:
906 auth_type = HCI_AT_DEDICATED_BONDING;
909 auth_type = HCI_AT_NO_BONDING;
912 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
913 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
914 auth_type = HCI_AT_NO_BONDING_MITM;
916 auth_type = HCI_AT_NO_BONDING;
918 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
919 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
921 switch (l2cap_pi(sk)->sec_level) {
922 case BT_SECURITY_HIGH:
923 auth_type = HCI_AT_GENERAL_BONDING_MITM;
925 case BT_SECURITY_MEDIUM:
926 auth_type = HCI_AT_GENERAL_BONDING;
929 auth_type = HCI_AT_NO_BONDING;
934 hcon = hci_connect(hdev, ACL_LINK, dst,
935 l2cap_pi(sk)->sec_level, auth_type);
939 conn = l2cap_conn_add(hcon, 0);
947 /* Update source addr of the socket */
948 bacpy(src, conn->src);
950 l2cap_chan_add(conn, sk, NULL);
952 sk->sk_state = BT_CONNECT;
953 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
955 if (hcon->state == BT_CONNECTED) {
956 if (sk->sk_type != SOCK_SEQPACKET) {
957 l2cap_sock_clear_timer(sk);
958 sk->sk_state = BT_CONNECTED;
964 hci_dev_unlock_bh(hdev);
969 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
971 struct sock *sk = sock->sk;
972 struct sockaddr_l2 la;
977 if (!addr || addr->sa_family != AF_BLUETOOTH)
980 memset(&la, 0, sizeof(la));
981 len = min_t(unsigned int, sizeof(la), alen);
982 memcpy(&la, addr, len);
989 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
994 switch (l2cap_pi(sk)->mode) {
995 case L2CAP_MODE_BASIC:
997 case L2CAP_MODE_ERTM:
998 case L2CAP_MODE_STREAMING:
1007 switch (sk->sk_state) {
1011 /* Already connecting */
1015 /* Already connected */
1028 /* Set destination address and psm */
1029 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1030 l2cap_pi(sk)->psm = la.l2_psm;
1032 err = l2cap_do_connect(sk);
1037 err = bt_sock_wait_state(sk, BT_CONNECTED,
1038 sock_sndtimeo(sk, flags & O_NONBLOCK));
1044 static int l2cap_sock_listen(struct socket *sock, int backlog)
1046 struct sock *sk = sock->sk;
1049 BT_DBG("sk %p backlog %d", sk, backlog);
1053 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1058 switch (l2cap_pi(sk)->mode) {
1059 case L2CAP_MODE_BASIC:
1061 case L2CAP_MODE_ERTM:
1062 case L2CAP_MODE_STREAMING:
1071 if (!l2cap_pi(sk)->psm) {
1072 bdaddr_t *src = &bt_sk(sk)->src;
1077 write_lock_bh(&l2cap_sk_list.lock);
1079 for (psm = 0x1001; psm < 0x1100; psm += 2)
1080 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1081 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1082 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1087 write_unlock_bh(&l2cap_sk_list.lock);
1093 sk->sk_max_ack_backlog = backlog;
1094 sk->sk_ack_backlog = 0;
1095 sk->sk_state = BT_LISTEN;
1102 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1104 DECLARE_WAITQUEUE(wait, current);
1105 struct sock *sk = sock->sk, *nsk;
1109 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1111 if (sk->sk_state != BT_LISTEN) {
1116 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1118 BT_DBG("sk %p timeo %ld", sk, timeo);
1120 /* Wait for an incoming connection. (wake-one). */
1121 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1122 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1123 set_current_state(TASK_INTERRUPTIBLE);
1130 timeo = schedule_timeout(timeo);
1131 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1133 if (sk->sk_state != BT_LISTEN) {
1138 if (signal_pending(current)) {
1139 err = sock_intr_errno(timeo);
1143 set_current_state(TASK_RUNNING);
1144 remove_wait_queue(sk->sk_sleep, &wait);
1149 newsock->state = SS_CONNECTED;
1151 BT_DBG("new socket %p", nsk);
1158 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1160 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1161 struct sock *sk = sock->sk;
1163 BT_DBG("sock %p, sk %p", sock, sk);
1165 addr->sa_family = AF_BLUETOOTH;
1166 *len = sizeof(struct sockaddr_l2);
1169 la->l2_psm = l2cap_pi(sk)->psm;
1170 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1171 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1173 la->l2_psm = l2cap_pi(sk)->sport;
1174 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1175 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1181 static void l2cap_drop_acked_frames(struct sock *sk)
1183 struct sk_buff *skb;
1185 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1186 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1189 skb = skb_dequeue(TX_QUEUE(sk));
1192 l2cap_pi(sk)->unacked_frames--;
1198 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1200 struct l2cap_pinfo *pi = l2cap_pi(sk);
1203 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1205 err = hci_send_acl(pi->conn->hcon, skb, 0);
1212 static int l2cap_ertm_send(struct sock *sk)
1214 struct sk_buff *skb, *tx_skb;
1215 struct l2cap_pinfo *pi = l2cap_pi(sk);
1219 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1220 tx_skb = skb_clone(skb, GFP_ATOMIC);
1222 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1223 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1224 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1225 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1227 err = l2cap_do_send(sk, tx_skb);
1229 l2cap_send_disconn_req(pi->conn, sk);
1233 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1234 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1236 pi->unacked_frames++;
1238 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1239 sk->sk_send_head = NULL;
1241 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1247 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1249 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1250 struct sk_buff **frag;
1253 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1260 /* Continuation fragments (no L2CAP header) */
1261 frag = &skb_shinfo(skb)->frag_list;
1263 count = min_t(unsigned int, conn->mtu, len);
1265 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1268 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1274 frag = &(*frag)->next;
1280 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1282 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1283 struct sk_buff *skb;
1284 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1285 struct l2cap_hdr *lh;
1287 BT_DBG("sk %p len %d", sk, (int)len);
1289 count = min_t(unsigned int, (conn->mtu - hlen), len);
1290 skb = bt_skb_send_alloc(sk, count + hlen,
1291 msg->msg_flags & MSG_DONTWAIT, &err);
1293 return ERR_PTR(-ENOMEM);
1295 /* Create L2CAP header */
1296 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1297 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1298 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1299 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1301 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1302 if (unlikely(err < 0)) {
1304 return ERR_PTR(err);
1309 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1311 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1312 struct sk_buff *skb;
1313 int err, count, hlen = L2CAP_HDR_SIZE;
1314 struct l2cap_hdr *lh;
1316 BT_DBG("sk %p len %d", sk, (int)len);
1318 count = min_t(unsigned int, (conn->mtu - hlen), len);
1319 skb = bt_skb_send_alloc(sk, count + hlen,
1320 msg->msg_flags & MSG_DONTWAIT, &err);
1322 return ERR_PTR(-ENOMEM);
1324 /* Create L2CAP header */
1325 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1326 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1327 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1329 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1330 if (unlikely(err < 0)) {
1332 return ERR_PTR(err);
1337 static struct sk_buff *l2cap_create_ertm_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control)
1339 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1340 struct sk_buff *skb;
1341 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1342 struct l2cap_hdr *lh;
1344 BT_DBG("sk %p len %d", sk, (int)len);
1346 count = min_t(unsigned int, (conn->mtu - hlen), len);
1347 skb = bt_skb_send_alloc(sk, count + hlen,
1348 msg->msg_flags & MSG_DONTWAIT, &err);
1350 return ERR_PTR(-ENOMEM);
1352 /* Create L2CAP header */
1353 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1354 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1355 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1356 put_unaligned_le16(control, skb_put(skb, 2));
1358 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1359 if (unlikely(err < 0)) {
1361 return ERR_PTR(err);
1366 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1368 struct sock *sk = sock->sk;
1369 struct l2cap_pinfo *pi = l2cap_pi(sk);
1370 struct sk_buff *skb;
1374 BT_DBG("sock %p, sk %p", sock, sk);
1376 err = sock_error(sk);
1380 if (msg->msg_flags & MSG_OOB)
1383 /* Check outgoing MTU */
1384 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1390 if (sk->sk_state != BT_CONNECTED) {
1395 /* Connectionless channel */
1396 if (sk->sk_type == SOCK_DGRAM) {
1397 skb = l2cap_create_connless_pdu(sk, msg, len);
1398 err = l2cap_do_send(sk, skb);
1403 case L2CAP_MODE_BASIC:
1404 /* Create a basic PDU */
1405 skb = l2cap_create_basic_pdu(sk, msg, len);
1411 err = l2cap_do_send(sk, skb);
1416 case L2CAP_MODE_ERTM:
1417 /* Entire SDU fits into one PDU */
1418 if (len <= pi->omtu) {
1419 control = L2CAP_SDU_UNSEGMENTED;
1420 skb = l2cap_create_ertm_pdu(sk, msg, len, control);
1426 /* FIXME: Segmentation will be added later */
1430 __skb_queue_tail(TX_QUEUE(sk), skb);
1431 if (sk->sk_send_head == NULL)
1432 sk->sk_send_head = skb;
1434 err = l2cap_ertm_send(sk);
1440 BT_DBG("bad state %1.1x", pi->mode);
1449 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1451 struct sock *sk = sock->sk;
1455 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1456 struct l2cap_conn_rsp rsp;
1458 sk->sk_state = BT_CONFIG;
1460 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1461 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1462 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1463 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1464 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1465 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1473 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1476 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1478 struct sock *sk = sock->sk;
1479 struct l2cap_options opts;
1483 BT_DBG("sk %p", sk);
1489 opts.imtu = l2cap_pi(sk)->imtu;
1490 opts.omtu = l2cap_pi(sk)->omtu;
1491 opts.flush_to = l2cap_pi(sk)->flush_to;
1492 opts.mode = l2cap_pi(sk)->mode;
1494 len = min_t(unsigned int, sizeof(opts), optlen);
1495 if (copy_from_user((char *) &opts, optval, len)) {
1500 l2cap_pi(sk)->imtu = opts.imtu;
1501 l2cap_pi(sk)->omtu = opts.omtu;
1502 l2cap_pi(sk)->mode = opts.mode;
1506 if (get_user(opt, (u32 __user *) optval)) {
1511 if (opt & L2CAP_LM_AUTH)
1512 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1513 if (opt & L2CAP_LM_ENCRYPT)
1514 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1515 if (opt & L2CAP_LM_SECURE)
1516 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1518 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1519 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1531 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1533 struct sock *sk = sock->sk;
1534 struct bt_security sec;
1538 BT_DBG("sk %p", sk);
1540 if (level == SOL_L2CAP)
1541 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1543 if (level != SOL_BLUETOOTH)
1544 return -ENOPROTOOPT;
1550 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1555 sec.level = BT_SECURITY_LOW;
1557 len = min_t(unsigned int, sizeof(sec), optlen);
1558 if (copy_from_user((char *) &sec, optval, len)) {
1563 if (sec.level < BT_SECURITY_LOW ||
1564 sec.level > BT_SECURITY_HIGH) {
1569 l2cap_pi(sk)->sec_level = sec.level;
1572 case BT_DEFER_SETUP:
1573 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1578 if (get_user(opt, (u32 __user *) optval)) {
1583 bt_sk(sk)->defer_setup = opt;
1595 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1597 struct sock *sk = sock->sk;
1598 struct l2cap_options opts;
1599 struct l2cap_conninfo cinfo;
1603 BT_DBG("sk %p", sk);
1605 if (get_user(len, optlen))
1612 opts.imtu = l2cap_pi(sk)->imtu;
1613 opts.omtu = l2cap_pi(sk)->omtu;
1614 opts.flush_to = l2cap_pi(sk)->flush_to;
1615 opts.mode = l2cap_pi(sk)->mode;
1617 len = min_t(unsigned int, len, sizeof(opts));
1618 if (copy_to_user(optval, (char *) &opts, len))
1624 switch (l2cap_pi(sk)->sec_level) {
1625 case BT_SECURITY_LOW:
1626 opt = L2CAP_LM_AUTH;
1628 case BT_SECURITY_MEDIUM:
1629 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1631 case BT_SECURITY_HIGH:
1632 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1640 if (l2cap_pi(sk)->role_switch)
1641 opt |= L2CAP_LM_MASTER;
1643 if (l2cap_pi(sk)->force_reliable)
1644 opt |= L2CAP_LM_RELIABLE;
1646 if (put_user(opt, (u32 __user *) optval))
1650 case L2CAP_CONNINFO:
1651 if (sk->sk_state != BT_CONNECTED &&
1652 !(sk->sk_state == BT_CONNECT2 &&
1653 bt_sk(sk)->defer_setup)) {
1658 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1659 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1661 len = min_t(unsigned int, len, sizeof(cinfo));
1662 if (copy_to_user(optval, (char *) &cinfo, len))
1676 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1678 struct sock *sk = sock->sk;
1679 struct bt_security sec;
1682 BT_DBG("sk %p", sk);
1684 if (level == SOL_L2CAP)
1685 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1687 if (level != SOL_BLUETOOTH)
1688 return -ENOPROTOOPT;
1690 if (get_user(len, optlen))
1697 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1702 sec.level = l2cap_pi(sk)->sec_level;
1704 len = min_t(unsigned int, len, sizeof(sec));
1705 if (copy_to_user(optval, (char *) &sec, len))
1710 case BT_DEFER_SETUP:
1711 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1716 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1730 static int l2cap_sock_shutdown(struct socket *sock, int how)
1732 struct sock *sk = sock->sk;
1735 BT_DBG("sock %p, sk %p", sock, sk);
1741 if (!sk->sk_shutdown) {
1742 sk->sk_shutdown = SHUTDOWN_MASK;
1743 l2cap_sock_clear_timer(sk);
1744 __l2cap_sock_close(sk, 0);
1746 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1747 err = bt_sock_wait_state(sk, BT_CLOSED,
1754 static int l2cap_sock_release(struct socket *sock)
1756 struct sock *sk = sock->sk;
1759 BT_DBG("sock %p, sk %p", sock, sk);
1764 err = l2cap_sock_shutdown(sock, 2);
1767 l2cap_sock_kill(sk);
1771 static void l2cap_chan_ready(struct sock *sk)
1773 struct sock *parent = bt_sk(sk)->parent;
1775 BT_DBG("sk %p, parent %p", sk, parent);
1777 l2cap_pi(sk)->conf_state = 0;
1778 l2cap_sock_clear_timer(sk);
1781 /* Outgoing channel.
1782 * Wake up socket sleeping on connect.
1784 sk->sk_state = BT_CONNECTED;
1785 sk->sk_state_change(sk);
1787 /* Incoming channel.
1788 * Wake up socket sleeping on accept.
1790 parent->sk_data_ready(parent, 0);
1794 /* Copy frame to all raw sockets on that connection */
1795 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1797 struct l2cap_chan_list *l = &conn->chan_list;
1798 struct sk_buff *nskb;
1801 BT_DBG("conn %p", conn);
1803 read_lock(&l->lock);
1804 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1805 if (sk->sk_type != SOCK_RAW)
1808 /* Don't send frame to the socket it came from */
1811 nskb = skb_clone(skb, GFP_ATOMIC);
1815 if (sock_queue_rcv_skb(sk, nskb))
1818 read_unlock(&l->lock);
1821 /* ---- L2CAP signalling commands ---- */
1822 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1823 u8 code, u8 ident, u16 dlen, void *data)
1825 struct sk_buff *skb, **frag;
1826 struct l2cap_cmd_hdr *cmd;
1827 struct l2cap_hdr *lh;
1830 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1831 conn, code, ident, dlen);
1833 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1834 count = min_t(unsigned int, conn->mtu, len);
1836 skb = bt_skb_alloc(count, GFP_ATOMIC);
1840 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1841 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1842 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1844 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1847 cmd->len = cpu_to_le16(dlen);
1850 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1851 memcpy(skb_put(skb, count), data, count);
1857 /* Continuation fragments (no L2CAP header) */
1858 frag = &skb_shinfo(skb)->frag_list;
1860 count = min_t(unsigned int, conn->mtu, len);
1862 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1866 memcpy(skb_put(*frag, count), data, count);
1871 frag = &(*frag)->next;
1881 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1883 struct l2cap_conf_opt *opt = *ptr;
1886 len = L2CAP_CONF_OPT_SIZE + opt->len;
1894 *val = *((u8 *) opt->val);
1898 *val = __le16_to_cpu(*((__le16 *) opt->val));
1902 *val = __le32_to_cpu(*((__le32 *) opt->val));
1906 *val = (unsigned long) opt->val;
1910 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1914 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1916 struct l2cap_conf_opt *opt = *ptr;
1918 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1925 *((u8 *) opt->val) = val;
1929 *((__le16 *) opt->val) = cpu_to_le16(val);
1933 *((__le32 *) opt->val) = cpu_to_le32(val);
1937 memcpy(opt->val, (void *) val, len);
1941 *ptr += L2CAP_CONF_OPT_SIZE + len;
1944 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1946 u32 local_feat_mask = l2cap_feat_mask;
1948 local_feat_mask |= L2CAP_FEAT_ERTM;
1951 case L2CAP_MODE_ERTM:
1952 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1953 case L2CAP_MODE_STREAMING:
1954 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1960 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1963 case L2CAP_MODE_STREAMING:
1964 case L2CAP_MODE_ERTM:
1965 if (l2cap_mode_supported(mode, remote_feat_mask))
1969 return L2CAP_MODE_BASIC;
1973 static int l2cap_build_conf_req(struct sock *sk, void *data)
1975 struct l2cap_pinfo *pi = l2cap_pi(sk);
1976 struct l2cap_conf_req *req = data;
1977 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
1978 void *ptr = req->data;
1980 BT_DBG("sk %p", sk);
1982 if (pi->num_conf_req || pi->num_conf_rsp)
1986 case L2CAP_MODE_STREAMING:
1987 case L2CAP_MODE_ERTM:
1988 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1989 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
1990 l2cap_send_disconn_req(pi->conn, sk);
1993 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1999 case L2CAP_MODE_BASIC:
2000 if (pi->imtu != L2CAP_DEFAULT_MTU)
2001 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2004 case L2CAP_MODE_ERTM:
2005 rfc.mode = L2CAP_MODE_ERTM;
2006 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2007 rfc.max_transmit = L2CAP_DEFAULT_MAX_RECEIVE;
2008 rfc.retrans_timeout = 0;
2009 rfc.monitor_timeout = 0;
2010 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_RX_APDU);
2012 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2013 sizeof(rfc), (unsigned long) &rfc);
2016 case L2CAP_MODE_STREAMING:
2017 rfc.mode = L2CAP_MODE_STREAMING;
2019 rfc.max_transmit = 0;
2020 rfc.retrans_timeout = 0;
2021 rfc.monitor_timeout = 0;
2022 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_RX_APDU);
2024 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2025 sizeof(rfc), (unsigned long) &rfc);
2029 /* FIXME: Need actual value of the flush timeout */
2030 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2031 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2033 req->dcid = cpu_to_le16(pi->dcid);
2034 req->flags = cpu_to_le16(0);
2039 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2041 struct l2cap_pinfo *pi = l2cap_pi(sk);
2042 struct l2cap_conf_rsp *rsp = data;
2043 void *ptr = rsp->data;
2044 void *req = pi->conf_req;
2045 int len = pi->conf_len;
2046 int type, hint, olen;
2048 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2049 u16 mtu = L2CAP_DEFAULT_MTU;
2050 u16 result = L2CAP_CONF_SUCCESS;
2052 BT_DBG("sk %p", sk);
2054 while (len >= L2CAP_CONF_OPT_SIZE) {
2055 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2057 hint = type & L2CAP_CONF_HINT;
2058 type &= L2CAP_CONF_MASK;
2061 case L2CAP_CONF_MTU:
2065 case L2CAP_CONF_FLUSH_TO:
2069 case L2CAP_CONF_QOS:
2072 case L2CAP_CONF_RFC:
2073 if (olen == sizeof(rfc))
2074 memcpy(&rfc, (void *) val, olen);
2081 result = L2CAP_CONF_UNKNOWN;
2082 *((u8 *) ptr++) = type;
2087 if (pi->num_conf_rsp || pi->num_conf_req)
2091 case L2CAP_MODE_STREAMING:
2092 case L2CAP_MODE_ERTM:
2093 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2094 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2095 return -ECONNREFUSED;
2098 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2103 if (pi->mode != rfc.mode) {
2104 result = L2CAP_CONF_UNACCEPT;
2105 rfc.mode = pi->mode;
2107 if (pi->num_conf_rsp == 1)
2108 return -ECONNREFUSED;
2110 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2111 sizeof(rfc), (unsigned long) &rfc);
2115 if (result == L2CAP_CONF_SUCCESS) {
2116 /* Configure output options and let the other side know
2117 * which ones we don't like. */
2119 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2120 result = L2CAP_CONF_UNACCEPT;
2123 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2125 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2128 case L2CAP_MODE_BASIC:
2129 pi->fcs = L2CAP_FCS_NONE;
2130 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2133 case L2CAP_MODE_ERTM:
2134 pi->remote_tx_win = rfc.txwin_size;
2135 pi->remote_max_tx = rfc.max_transmit;
2136 pi->max_pdu_size = rfc.max_pdu_size;
2138 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2139 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2141 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2144 case L2CAP_MODE_STREAMING:
2145 pi->remote_tx_win = rfc.txwin_size;
2146 pi->max_pdu_size = rfc.max_pdu_size;
2148 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2152 result = L2CAP_CONF_UNACCEPT;
2154 memset(&rfc, 0, sizeof(rfc));
2155 rfc.mode = pi->mode;
2158 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2159 sizeof(rfc), (unsigned long) &rfc);
2161 if (result == L2CAP_CONF_SUCCESS)
2162 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2164 rsp->scid = cpu_to_le16(pi->dcid);
2165 rsp->result = cpu_to_le16(result);
2166 rsp->flags = cpu_to_le16(0x0000);
2171 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2173 struct l2cap_pinfo *pi = l2cap_pi(sk);
2174 struct l2cap_conf_req *req = data;
2175 void *ptr = req->data;
2178 struct l2cap_conf_rfc rfc;
2180 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2182 while (len >= L2CAP_CONF_OPT_SIZE) {
2183 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2186 case L2CAP_CONF_MTU:
2187 if (val < L2CAP_DEFAULT_MIN_MTU) {
2188 *result = L2CAP_CONF_UNACCEPT;
2189 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2195 case L2CAP_CONF_FLUSH_TO:
2197 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2201 case L2CAP_CONF_RFC:
2202 if (olen == sizeof(rfc))
2203 memcpy(&rfc, (void *)val, olen);
2205 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2206 rfc.mode != pi->mode)
2207 return -ECONNREFUSED;
2209 pi->mode = rfc.mode;
2212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2213 sizeof(rfc), (unsigned long) &rfc);
2218 if (*result == L2CAP_CONF_SUCCESS) {
2220 case L2CAP_MODE_ERTM:
2221 pi->remote_tx_win = rfc.txwin_size;
2222 pi->retrans_timeout = rfc.retrans_timeout;
2223 pi->monitor_timeout = rfc.monitor_timeout;
2224 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2226 case L2CAP_MODE_STREAMING:
2227 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2232 req->dcid = cpu_to_le16(pi->dcid);
2233 req->flags = cpu_to_le16(0x0000);
2238 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2240 struct l2cap_conf_rsp *rsp = data;
2241 void *ptr = rsp->data;
2243 BT_DBG("sk %p", sk);
2245 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2246 rsp->result = cpu_to_le16(result);
2247 rsp->flags = cpu_to_le16(flags);
2252 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2254 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2256 if (rej->reason != 0x0000)
2259 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2260 cmd->ident == conn->info_ident) {
2261 del_timer(&conn->info_timer);
2263 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2264 conn->info_ident = 0;
2266 l2cap_conn_start(conn);
2272 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2274 struct l2cap_chan_list *list = &conn->chan_list;
2275 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2276 struct l2cap_conn_rsp rsp;
2277 struct sock *sk, *parent;
2278 int result, status = L2CAP_CS_NO_INFO;
2280 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2281 __le16 psm = req->psm;
2283 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2285 /* Check if we have socket listening on psm */
2286 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2288 result = L2CAP_CR_BAD_PSM;
2292 /* Check if the ACL is secure enough (if not SDP) */
2293 if (psm != cpu_to_le16(0x0001) &&
2294 !hci_conn_check_link_mode(conn->hcon)) {
2295 conn->disc_reason = 0x05;
2296 result = L2CAP_CR_SEC_BLOCK;
2300 result = L2CAP_CR_NO_MEM;
2302 /* Check for backlog size */
2303 if (sk_acceptq_is_full(parent)) {
2304 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2308 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2312 write_lock_bh(&list->lock);
2314 /* Check if we already have channel with that dcid */
2315 if (__l2cap_get_chan_by_dcid(list, scid)) {
2316 write_unlock_bh(&list->lock);
2317 sock_set_flag(sk, SOCK_ZAPPED);
2318 l2cap_sock_kill(sk);
2322 hci_conn_hold(conn->hcon);
2324 l2cap_sock_init(sk, parent);
2325 bacpy(&bt_sk(sk)->src, conn->src);
2326 bacpy(&bt_sk(sk)->dst, conn->dst);
2327 l2cap_pi(sk)->psm = psm;
2328 l2cap_pi(sk)->dcid = scid;
2330 __l2cap_chan_add(conn, sk, parent);
2331 dcid = l2cap_pi(sk)->scid;
2333 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2335 l2cap_pi(sk)->ident = cmd->ident;
2337 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2338 if (l2cap_check_security(sk)) {
2339 if (bt_sk(sk)->defer_setup) {
2340 sk->sk_state = BT_CONNECT2;
2341 result = L2CAP_CR_PEND;
2342 status = L2CAP_CS_AUTHOR_PEND;
2343 parent->sk_data_ready(parent, 0);
2345 sk->sk_state = BT_CONFIG;
2346 result = L2CAP_CR_SUCCESS;
2347 status = L2CAP_CS_NO_INFO;
2350 sk->sk_state = BT_CONNECT2;
2351 result = L2CAP_CR_PEND;
2352 status = L2CAP_CS_AUTHEN_PEND;
2355 sk->sk_state = BT_CONNECT2;
2356 result = L2CAP_CR_PEND;
2357 status = L2CAP_CS_NO_INFO;
2360 write_unlock_bh(&list->lock);
2363 bh_unlock_sock(parent);
2366 rsp.scid = cpu_to_le16(scid);
2367 rsp.dcid = cpu_to_le16(dcid);
2368 rsp.result = cpu_to_le16(result);
2369 rsp.status = cpu_to_le16(status);
2370 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2372 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2373 struct l2cap_info_req info;
2374 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2376 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2377 conn->info_ident = l2cap_get_ident(conn);
2379 mod_timer(&conn->info_timer, jiffies +
2380 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2382 l2cap_send_cmd(conn, conn->info_ident,
2383 L2CAP_INFO_REQ, sizeof(info), &info);
2389 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2391 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2392 u16 scid, dcid, result, status;
2396 scid = __le16_to_cpu(rsp->scid);
2397 dcid = __le16_to_cpu(rsp->dcid);
2398 result = __le16_to_cpu(rsp->result);
2399 status = __le16_to_cpu(rsp->status);
2401 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2404 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2408 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2414 case L2CAP_CR_SUCCESS:
2415 sk->sk_state = BT_CONFIG;
2416 l2cap_pi(sk)->ident = 0;
2417 l2cap_pi(sk)->dcid = dcid;
2418 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2420 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2422 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2423 l2cap_build_conf_req(sk, req), req);
2424 l2cap_pi(sk)->num_conf_req++;
2428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2432 l2cap_chan_del(sk, ECONNREFUSED);
2440 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2442 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2448 dcid = __le16_to_cpu(req->dcid);
2449 flags = __le16_to_cpu(req->flags);
2451 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2453 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2457 if (sk->sk_state == BT_DISCONN)
2460 /* Reject if config buffer is too small. */
2461 len = cmd_len - sizeof(*req);
2462 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2463 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2464 l2cap_build_conf_rsp(sk, rsp,
2465 L2CAP_CONF_REJECT, flags), rsp);
2470 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2471 l2cap_pi(sk)->conf_len += len;
2473 if (flags & 0x0001) {
2474 /* Incomplete config. Send empty response. */
2475 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2476 l2cap_build_conf_rsp(sk, rsp,
2477 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2481 /* Complete config. */
2482 len = l2cap_parse_conf_req(sk, rsp);
2484 l2cap_send_disconn_req(conn, sk);
2488 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2489 l2cap_pi(sk)->num_conf_rsp++;
2491 /* Reset config buffer. */
2492 l2cap_pi(sk)->conf_len = 0;
2494 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2497 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2498 sk->sk_state = BT_CONNECTED;
2499 l2cap_pi(sk)->next_tx_seq = 0;
2500 l2cap_pi(sk)->expected_ack_seq = 0;
2501 l2cap_pi(sk)->unacked_frames = 0;
2502 __skb_queue_head_init(TX_QUEUE(sk));
2503 l2cap_chan_ready(sk);
2507 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2509 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2510 l2cap_build_conf_req(sk, buf), buf);
2511 l2cap_pi(sk)->num_conf_req++;
2519 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2521 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2522 u16 scid, flags, result;
2525 scid = __le16_to_cpu(rsp->scid);
2526 flags = __le16_to_cpu(rsp->flags);
2527 result = __le16_to_cpu(rsp->result);
2529 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2530 scid, flags, result);
2532 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2537 case L2CAP_CONF_SUCCESS:
2540 case L2CAP_CONF_UNACCEPT:
2541 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2542 int len = cmd->len - sizeof(*rsp);
2545 /* throw out any old stored conf requests */
2546 result = L2CAP_CONF_SUCCESS;
2547 len = l2cap_parse_conf_rsp(sk, rsp->data,
2550 l2cap_send_disconn_req(conn, sk);
2554 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2555 L2CAP_CONF_REQ, len, req);
2556 l2cap_pi(sk)->num_conf_req++;
2557 if (result != L2CAP_CONF_SUCCESS)
2563 sk->sk_state = BT_DISCONN;
2564 sk->sk_err = ECONNRESET;
2565 l2cap_sock_set_timer(sk, HZ * 5);
2566 l2cap_send_disconn_req(conn, sk);
2573 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2575 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2576 sk->sk_state = BT_CONNECTED;
2577 l2cap_pi(sk)->expected_tx_seq = 0;
2578 l2cap_pi(sk)->num_to_ack = 0;
2579 __skb_queue_head_init(TX_QUEUE(sk));
2580 l2cap_chan_ready(sk);
2588 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2590 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2591 struct l2cap_disconn_rsp rsp;
2595 scid = __le16_to_cpu(req->scid);
2596 dcid = __le16_to_cpu(req->dcid);
2598 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2600 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2604 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2605 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2606 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2608 sk->sk_shutdown = SHUTDOWN_MASK;
2610 skb_queue_purge(TX_QUEUE(sk));
2612 l2cap_chan_del(sk, ECONNRESET);
2615 l2cap_sock_kill(sk);
2619 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2621 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2625 scid = __le16_to_cpu(rsp->scid);
2626 dcid = __le16_to_cpu(rsp->dcid);
2628 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2630 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2634 skb_queue_purge(TX_QUEUE(sk));
2636 l2cap_chan_del(sk, 0);
2639 l2cap_sock_kill(sk);
2643 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2645 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2648 type = __le16_to_cpu(req->type);
2650 BT_DBG("type 0x%4.4x", type);
2652 if (type == L2CAP_IT_FEAT_MASK) {
2654 u32 feat_mask = l2cap_feat_mask;
2655 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2656 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2657 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2659 feat_mask |= L2CAP_FEAT_ERTM;
2660 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2661 l2cap_send_cmd(conn, cmd->ident,
2662 L2CAP_INFO_RSP, sizeof(buf), buf);
2663 } else if (type == L2CAP_IT_FIXED_CHAN) {
2665 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2666 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2667 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2668 memcpy(buf + 4, l2cap_fixed_chan, 8);
2669 l2cap_send_cmd(conn, cmd->ident,
2670 L2CAP_INFO_RSP, sizeof(buf), buf);
2672 struct l2cap_info_rsp rsp;
2673 rsp.type = cpu_to_le16(type);
2674 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2675 l2cap_send_cmd(conn, cmd->ident,
2676 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2682 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2684 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2687 type = __le16_to_cpu(rsp->type);
2688 result = __le16_to_cpu(rsp->result);
2690 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2692 del_timer(&conn->info_timer);
2694 if (type == L2CAP_IT_FEAT_MASK) {
2695 conn->feat_mask = get_unaligned_le32(rsp->data);
2697 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2698 struct l2cap_info_req req;
2699 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2701 conn->info_ident = l2cap_get_ident(conn);
2703 l2cap_send_cmd(conn, conn->info_ident,
2704 L2CAP_INFO_REQ, sizeof(req), &req);
2706 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2707 conn->info_ident = 0;
2709 l2cap_conn_start(conn);
2711 } else if (type == L2CAP_IT_FIXED_CHAN) {
2712 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2713 conn->info_ident = 0;
2715 l2cap_conn_start(conn);
2721 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2723 u8 *data = skb->data;
2725 struct l2cap_cmd_hdr cmd;
2728 l2cap_raw_recv(conn, skb);
2730 while (len >= L2CAP_CMD_HDR_SIZE) {
2732 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2733 data += L2CAP_CMD_HDR_SIZE;
2734 len -= L2CAP_CMD_HDR_SIZE;
2736 cmd_len = le16_to_cpu(cmd.len);
2738 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2740 if (cmd_len > len || !cmd.ident) {
2741 BT_DBG("corrupted command");
2746 case L2CAP_COMMAND_REJ:
2747 l2cap_command_rej(conn, &cmd, data);
2750 case L2CAP_CONN_REQ:
2751 err = l2cap_connect_req(conn, &cmd, data);
2754 case L2CAP_CONN_RSP:
2755 err = l2cap_connect_rsp(conn, &cmd, data);
2758 case L2CAP_CONF_REQ:
2759 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2762 case L2CAP_CONF_RSP:
2763 err = l2cap_config_rsp(conn, &cmd, data);
2766 case L2CAP_DISCONN_REQ:
2767 err = l2cap_disconnect_req(conn, &cmd, data);
2770 case L2CAP_DISCONN_RSP:
2771 err = l2cap_disconnect_rsp(conn, &cmd, data);
2774 case L2CAP_ECHO_REQ:
2775 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2778 case L2CAP_ECHO_RSP:
2781 case L2CAP_INFO_REQ:
2782 err = l2cap_information_req(conn, &cmd, data);
2785 case L2CAP_INFO_RSP:
2786 err = l2cap_information_rsp(conn, &cmd, data);
2790 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2796 struct l2cap_cmd_rej rej;
2797 BT_DBG("error %d", err);
2799 /* FIXME: Map err to a valid reason */
2800 rej.reason = cpu_to_le16(0);
2801 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2811 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
2813 struct l2cap_pinfo *pi = l2cap_pi(sk);
2814 u8 tx_seq = __get_txseq(rx_control);
2818 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
2820 if (tx_seq != pi->expected_tx_seq)
2823 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
2824 err = sock_queue_rcv_skb(sk, skb);
2828 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
2829 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
2830 tx_control |= L2CAP_CTRL_FRAME_TYPE;
2831 tx_control |= L2CAP_SUPER_RCV_READY;
2832 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2833 err = l2cap_send_sframe(pi, tx_control);
2838 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
2840 struct l2cap_pinfo *pi = l2cap_pi(sk);
2842 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
2844 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
2845 case L2CAP_SUPER_RCV_READY:
2846 pi->expected_ack_seq = __get_reqseq(rx_control);
2847 l2cap_drop_acked_frames(sk);
2848 l2cap_ertm_send(sk);
2851 case L2CAP_SUPER_RCV_NOT_READY:
2852 case L2CAP_SUPER_REJECT:
2853 case L2CAP_SUPER_SELECT_REJECT:
2860 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2866 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2868 BT_DBG("unknown cid 0x%4.4x", cid);
2872 BT_DBG("sk %p, len %d", sk, skb->len);
2874 if (sk->sk_state != BT_CONNECTED)
2877 switch (l2cap_pi(sk)->mode) {
2878 case L2CAP_MODE_BASIC:
2879 /* If socket recv buffers overflows we drop data here
2880 * which is *bad* because L2CAP has to be reliable.
2881 * But we don't have any other choice. L2CAP doesn't
2882 * provide flow control mechanism. */
2884 if (l2cap_pi(sk)->imtu < skb->len)
2887 if (!sock_queue_rcv_skb(sk, skb))
2891 case L2CAP_MODE_ERTM:
2892 control = get_unaligned_le16(skb->data);
2895 if (l2cap_pi(sk)->imtu < skb->len)
2898 if (__is_iframe(control))
2899 err = l2cap_data_channel_iframe(sk, control, skb);
2901 err = l2cap_data_channel_sframe(sk, control, skb);
2908 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
2922 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2926 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2930 BT_DBG("sk %p, len %d", sk, skb->len);
2932 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2935 if (l2cap_pi(sk)->imtu < skb->len)
2938 if (!sock_queue_rcv_skb(sk, skb))
2950 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2952 struct l2cap_hdr *lh = (void *) skb->data;
2956 skb_pull(skb, L2CAP_HDR_SIZE);
2957 cid = __le16_to_cpu(lh->cid);
2958 len = __le16_to_cpu(lh->len);
2960 if (len != skb->len) {
2965 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2968 case L2CAP_CID_SIGNALING:
2969 l2cap_sig_channel(conn, skb);
2972 case L2CAP_CID_CONN_LESS:
2973 psm = get_unaligned((__le16 *) skb->data);
2975 l2cap_conless_channel(conn, psm, skb);
2979 l2cap_data_channel(conn, cid, skb);
2984 /* ---- L2CAP interface with lower layer (HCI) ---- */
2986 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2988 int exact = 0, lm1 = 0, lm2 = 0;
2989 register struct sock *sk;
2990 struct hlist_node *node;
2992 if (type != ACL_LINK)
2995 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2997 /* Find listening sockets and check their link_mode */
2998 read_lock(&l2cap_sk_list.lock);
2999 sk_for_each(sk, node, &l2cap_sk_list.head) {
3000 if (sk->sk_state != BT_LISTEN)
3003 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3004 lm1 |= HCI_LM_ACCEPT;
3005 if (l2cap_pi(sk)->role_switch)
3006 lm1 |= HCI_LM_MASTER;
3008 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3009 lm2 |= HCI_LM_ACCEPT;
3010 if (l2cap_pi(sk)->role_switch)
3011 lm2 |= HCI_LM_MASTER;
3014 read_unlock(&l2cap_sk_list.lock);
3016 return exact ? lm1 : lm2;
3019 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3021 struct l2cap_conn *conn;
3023 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3025 if (hcon->type != ACL_LINK)
3029 conn = l2cap_conn_add(hcon, status);
3031 l2cap_conn_ready(conn);
3033 l2cap_conn_del(hcon, bt_err(status));
3038 static int l2cap_disconn_ind(struct hci_conn *hcon)
3040 struct l2cap_conn *conn = hcon->l2cap_data;
3042 BT_DBG("hcon %p", hcon);
3044 if (hcon->type != ACL_LINK || !conn)
3047 return conn->disc_reason;
3050 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3052 BT_DBG("hcon %p reason %d", hcon, reason);
3054 if (hcon->type != ACL_LINK)
3057 l2cap_conn_del(hcon, bt_err(reason));
3062 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3064 if (sk->sk_type != SOCK_SEQPACKET)
3067 if (encrypt == 0x00) {
3068 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3069 l2cap_sock_clear_timer(sk);
3070 l2cap_sock_set_timer(sk, HZ * 5);
3071 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3072 __l2cap_sock_close(sk, ECONNREFUSED);
3074 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3075 l2cap_sock_clear_timer(sk);
3079 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3081 struct l2cap_chan_list *l;
3082 struct l2cap_conn *conn = hcon->l2cap_data;
3088 l = &conn->chan_list;
3090 BT_DBG("conn %p", conn);
3092 read_lock(&l->lock);
3094 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3097 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3102 if (!status && (sk->sk_state == BT_CONNECTED ||
3103 sk->sk_state == BT_CONFIG)) {
3104 l2cap_check_encryption(sk, encrypt);
3109 if (sk->sk_state == BT_CONNECT) {
3111 struct l2cap_conn_req req;
3112 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3113 req.psm = l2cap_pi(sk)->psm;
3115 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3117 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3118 L2CAP_CONN_REQ, sizeof(req), &req);
3120 l2cap_sock_clear_timer(sk);
3121 l2cap_sock_set_timer(sk, HZ / 10);
3123 } else if (sk->sk_state == BT_CONNECT2) {
3124 struct l2cap_conn_rsp rsp;
3128 sk->sk_state = BT_CONFIG;
3129 result = L2CAP_CR_SUCCESS;
3131 sk->sk_state = BT_DISCONN;
3132 l2cap_sock_set_timer(sk, HZ / 10);
3133 result = L2CAP_CR_SEC_BLOCK;
3136 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3137 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3138 rsp.result = cpu_to_le16(result);
3139 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3140 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3141 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3147 read_unlock(&l->lock);
3152 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3154 struct l2cap_conn *conn = hcon->l2cap_data;
3156 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3159 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3161 if (flags & ACL_START) {
3162 struct l2cap_hdr *hdr;
3166 BT_ERR("Unexpected start frame (len %d)", skb->len);
3167 kfree_skb(conn->rx_skb);
3168 conn->rx_skb = NULL;
3170 l2cap_conn_unreliable(conn, ECOMM);
3174 BT_ERR("Frame is too short (len %d)", skb->len);
3175 l2cap_conn_unreliable(conn, ECOMM);
3179 hdr = (struct l2cap_hdr *) skb->data;
3180 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3182 if (len == skb->len) {
3183 /* Complete frame received */
3184 l2cap_recv_frame(conn, skb);
3188 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3190 if (skb->len > len) {
3191 BT_ERR("Frame is too long (len %d, expected len %d)",
3193 l2cap_conn_unreliable(conn, ECOMM);
3197 /* Allocate skb for the complete frame (with header) */
3198 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3202 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3204 conn->rx_len = len - skb->len;
3206 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3208 if (!conn->rx_len) {
3209 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3210 l2cap_conn_unreliable(conn, ECOMM);
3214 if (skb->len > conn->rx_len) {
3215 BT_ERR("Fragment is too long (len %d, expected %d)",
3216 skb->len, conn->rx_len);
3217 kfree_skb(conn->rx_skb);
3218 conn->rx_skb = NULL;
3220 l2cap_conn_unreliable(conn, ECOMM);
3224 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3226 conn->rx_len -= skb->len;
3228 if (!conn->rx_len) {
3229 /* Complete frame received */
3230 l2cap_recv_frame(conn, conn->rx_skb);
3231 conn->rx_skb = NULL;
3240 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3243 struct hlist_node *node;
3246 read_lock_bh(&l2cap_sk_list.lock);
3248 sk_for_each(sk, node, &l2cap_sk_list.head) {
3249 struct l2cap_pinfo *pi = l2cap_pi(sk);
3251 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3252 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3253 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3254 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3257 read_unlock_bh(&l2cap_sk_list.lock);
3262 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3264 static const struct proto_ops l2cap_sock_ops = {
3265 .family = PF_BLUETOOTH,
3266 .owner = THIS_MODULE,
3267 .release = l2cap_sock_release,
3268 .bind = l2cap_sock_bind,
3269 .connect = l2cap_sock_connect,
3270 .listen = l2cap_sock_listen,
3271 .accept = l2cap_sock_accept,
3272 .getname = l2cap_sock_getname,
3273 .sendmsg = l2cap_sock_sendmsg,
3274 .recvmsg = l2cap_sock_recvmsg,
3275 .poll = bt_sock_poll,
3276 .ioctl = bt_sock_ioctl,
3277 .mmap = sock_no_mmap,
3278 .socketpair = sock_no_socketpair,
3279 .shutdown = l2cap_sock_shutdown,
3280 .setsockopt = l2cap_sock_setsockopt,
3281 .getsockopt = l2cap_sock_getsockopt
3284 static struct net_proto_family l2cap_sock_family_ops = {
3285 .family = PF_BLUETOOTH,
3286 .owner = THIS_MODULE,
3287 .create = l2cap_sock_create,
3290 static struct hci_proto l2cap_hci_proto = {
3292 .id = HCI_PROTO_L2CAP,
3293 .connect_ind = l2cap_connect_ind,
3294 .connect_cfm = l2cap_connect_cfm,
3295 .disconn_ind = l2cap_disconn_ind,
3296 .disconn_cfm = l2cap_disconn_cfm,
3297 .security_cfm = l2cap_security_cfm,
3298 .recv_acldata = l2cap_recv_acldata
3301 static int __init l2cap_init(void)
3305 err = proto_register(&l2cap_proto, 0);
3309 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3311 BT_ERR("L2CAP socket registration failed");
3315 err = hci_register_proto(&l2cap_hci_proto);
3317 BT_ERR("L2CAP protocol registration failed");
3318 bt_sock_unregister(BTPROTO_L2CAP);
3322 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3323 BT_ERR("Failed to create L2CAP info file");
3325 BT_INFO("L2CAP ver %s", VERSION);
3326 BT_INFO("L2CAP socket layer initialized");
3331 proto_unregister(&l2cap_proto);
3335 static void __exit l2cap_exit(void)
3337 class_remove_file(bt_class, &class_attr_l2cap);
3339 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3340 BT_ERR("L2CAP socket unregistration failed");
3342 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3343 BT_ERR("L2CAP protocol unregistration failed");
3345 proto_unregister(&l2cap_proto);
3348 void l2cap_load(void)
3350 /* Dummy function to trigger automatic L2CAP module loading by
3351 * other modules that use L2CAP sockets but don't use any other
3352 * symbols from it. */
3355 EXPORT_SYMBOL(l2cap_load);
3357 module_init(l2cap_init);
3358 module_exit(l2cap_exit);
3360 module_param(enable_ertm, bool, 0644);
3361 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3363 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3364 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3365 MODULE_VERSION(VERSION);
3366 MODULE_LICENSE("GPL");
3367 MODULE_ALIAS("bt-proto-0");