2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.14"
55 static int enable_ertm = 0;
57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
58 static u8 l2cap_fixed_chan[8] = { 0x02, };
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
79 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
84 reason = ECONNREFUSED;
85 else if (sk->sk_state == BT_CONNECT &&
86 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
87 reason = ECONNREFUSED;
91 __l2cap_sock_close(sk, reason);
99 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 static void l2cap_sock_clear_timer(struct sock *sk)
107 BT_DBG("sock %p state %d", sk, sk->sk_state);
108 sk_stop_timer(sk, &sk->sk_timer);
111 /* ---- L2CAP channels ---- */
112 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
116 if (l2cap_pi(s)->dcid == cid)
122 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->scid == cid)
132 /* Find channel with given SCID.
133 * Returns locked socket */
134 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 s = __l2cap_get_chan_by_scid(l, cid);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
162 read_unlock(&l->lock);
166 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(l, cid))
178 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 l2cap_pi(l->head)->prev_c = sk;
185 l2cap_pi(sk)->next_c = l->head;
186 l2cap_pi(sk)->prev_c = NULL;
190 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194 write_lock_bh(&l->lock);
199 l2cap_pi(next)->prev_c = prev;
201 l2cap_pi(prev)->next_c = next;
202 write_unlock_bh(&l->lock);
207 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 struct l2cap_chan_list *l = &conn->chan_list;
211 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
212 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214 conn->disc_reason = 0x13;
216 l2cap_pi(sk)->conn = conn;
218 if (sk->sk_type == SOCK_SEQPACKET) {
219 /* Alloc CID for connection-oriented socket */
220 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
221 } else if (sk->sk_type == SOCK_DGRAM) {
222 /* Connectionless socket */
223 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
224 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 /* Raw socket can send/recv signalling messages only */
228 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
229 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 __l2cap_chan_link(l, sk);
236 bt_accept_enqueue(parent, sk);
240 * Must be called on the locked socket. */
241 static void l2cap_chan_del(struct sock *sk, int err)
243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
244 struct sock *parent = bt_sk(sk)->parent;
246 l2cap_sock_clear_timer(sk);
248 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 /* Unlink from channel list */
252 l2cap_chan_unlink(&conn->chan_list, sk);
253 l2cap_pi(sk)->conn = NULL;
254 hci_conn_put(conn->hcon);
257 sk->sk_state = BT_CLOSED;
258 sock_set_flag(sk, SOCK_ZAPPED);
264 bt_accept_unlink(sk);
265 parent->sk_data_ready(parent, 0);
267 sk->sk_state_change(sk);
270 /* Service level security */
271 static inline int l2cap_check_security(struct sock *sk)
273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
278 auth_type = HCI_AT_NO_BONDING_MITM;
280 auth_type = HCI_AT_NO_BONDING;
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 case BT_SECURITY_MEDIUM:
290 auth_type = HCI_AT_GENERAL_BONDING;
293 auth_type = HCI_AT_NO_BONDING;
298 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
302 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 /* Get next available identificator.
307 * 1 - 128 are used by kernel.
308 * 129 - 199 are reserved.
309 * 200 - 254 are used by utilities like l2ping, etc.
312 spin_lock_bh(&conn->lock);
314 if (++conn->tx_ident > 128)
319 spin_unlock_bh(&conn->lock);
324 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
333 return hci_send_acl(conn->hcon, skb, 0);
336 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339 struct l2cap_hdr *lh;
340 struct l2cap_conn *conn = pi->conn;
343 BT_DBG("pi %p, control 0x%2.2x", pi, control);
345 count = min_t(unsigned int, conn->mtu, L2CAP_HDR_SIZE + 2);
346 control |= L2CAP_CTRL_FRAME_TYPE;
348 skb = bt_skb_alloc(count, GFP_ATOMIC);
352 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
353 lh->len = cpu_to_le16(2);
354 lh->cid = cpu_to_le16(pi->dcid);
355 put_unaligned_le16(control, skb_put(skb, 2));
357 return hci_send_acl(pi->conn->hcon, skb, 0);
360 static void l2cap_do_start(struct sock *sk)
362 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
364 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
365 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
368 if (l2cap_check_security(sk)) {
369 struct l2cap_conn_req req;
370 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
371 req.psm = l2cap_pi(sk)->psm;
373 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
375 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
376 L2CAP_CONN_REQ, sizeof(req), &req);
379 struct l2cap_info_req req;
380 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
383 conn->info_ident = l2cap_get_ident(conn);
385 mod_timer(&conn->info_timer, jiffies +
386 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
388 l2cap_send_cmd(conn, conn->info_ident,
389 L2CAP_INFO_REQ, sizeof(req), &req);
393 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
395 struct l2cap_disconn_req req;
397 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
398 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
399 l2cap_send_cmd(conn, l2cap_get_ident(conn),
400 L2CAP_DISCONN_REQ, sizeof(req), &req);
403 /* ---- L2CAP connections ---- */
404 static void l2cap_conn_start(struct l2cap_conn *conn)
406 struct l2cap_chan_list *l = &conn->chan_list;
409 BT_DBG("conn %p", conn);
413 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
416 if (sk->sk_type != SOCK_SEQPACKET) {
421 if (sk->sk_state == BT_CONNECT) {
422 if (l2cap_check_security(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
429 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
430 L2CAP_CONN_REQ, sizeof(req), &req);
432 } else if (sk->sk_state == BT_CONNECT2) {
433 struct l2cap_conn_rsp rsp;
434 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
435 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
437 if (l2cap_check_security(sk)) {
438 if (bt_sk(sk)->defer_setup) {
439 struct sock *parent = bt_sk(sk)->parent;
440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
442 parent->sk_data_ready(parent, 0);
445 sk->sk_state = BT_CONFIG;
446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
454 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
455 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
461 read_unlock(&l->lock);
464 static void l2cap_conn_ready(struct l2cap_conn *conn)
466 struct l2cap_chan_list *l = &conn->chan_list;
469 BT_DBG("conn %p", conn);
473 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
476 if (sk->sk_type != SOCK_SEQPACKET) {
477 l2cap_sock_clear_timer(sk);
478 sk->sk_state = BT_CONNECTED;
479 sk->sk_state_change(sk);
480 } else if (sk->sk_state == BT_CONNECT)
486 read_unlock(&l->lock);
489 /* Notify sockets that we cannot guaranty reliability anymore */
490 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
492 struct l2cap_chan_list *l = &conn->chan_list;
495 BT_DBG("conn %p", conn);
499 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
500 if (l2cap_pi(sk)->force_reliable)
504 read_unlock(&l->lock);
507 static void l2cap_info_timeout(unsigned long arg)
509 struct l2cap_conn *conn = (void *) arg;
511 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
512 conn->info_ident = 0;
514 l2cap_conn_start(conn);
517 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
519 struct l2cap_conn *conn = hcon->l2cap_data;
524 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
528 hcon->l2cap_data = conn;
531 BT_DBG("hcon %p conn %p", hcon, conn);
533 conn->mtu = hcon->hdev->acl_mtu;
534 conn->src = &hcon->hdev->bdaddr;
535 conn->dst = &hcon->dst;
539 setup_timer(&conn->info_timer, l2cap_info_timeout,
540 (unsigned long) conn);
542 spin_lock_init(&conn->lock);
543 rwlock_init(&conn->chan_list.lock);
545 conn->disc_reason = 0x13;
550 static void l2cap_conn_del(struct hci_conn *hcon, int err)
552 struct l2cap_conn *conn = hcon->l2cap_data;
558 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
560 kfree_skb(conn->rx_skb);
563 while ((sk = conn->chan_list.head)) {
565 l2cap_chan_del(sk, err);
570 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
571 del_timer_sync(&conn->info_timer);
573 hcon->l2cap_data = NULL;
577 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
579 struct l2cap_chan_list *l = &conn->chan_list;
580 write_lock_bh(&l->lock);
581 __l2cap_chan_add(conn, sk, parent);
582 write_unlock_bh(&l->lock);
585 /* ---- Socket interface ---- */
586 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
589 struct hlist_node *node;
590 sk_for_each(sk, node, &l2cap_sk_list.head)
591 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
598 /* Find socket with psm and source bdaddr.
599 * Returns closest match.
601 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
603 struct sock *sk = NULL, *sk1 = NULL;
604 struct hlist_node *node;
606 sk_for_each(sk, node, &l2cap_sk_list.head) {
607 if (state && sk->sk_state != state)
610 if (l2cap_pi(sk)->psm == psm) {
612 if (!bacmp(&bt_sk(sk)->src, src))
616 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
620 return node ? sk : sk1;
623 /* Find socket with given address (psm, src).
624 * Returns locked socket */
625 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
628 read_lock(&l2cap_sk_list.lock);
629 s = __l2cap_get_sock_by_psm(state, psm, src);
632 read_unlock(&l2cap_sk_list.lock);
636 static void l2cap_sock_destruct(struct sock *sk)
640 skb_queue_purge(&sk->sk_receive_queue);
641 skb_queue_purge(&sk->sk_write_queue);
644 static void l2cap_sock_cleanup_listen(struct sock *parent)
648 BT_DBG("parent %p", parent);
650 /* Close not yet accepted channels */
651 while ((sk = bt_accept_dequeue(parent, NULL)))
652 l2cap_sock_close(sk);
654 parent->sk_state = BT_CLOSED;
655 sock_set_flag(parent, SOCK_ZAPPED);
658 /* Kill socket (only if zapped and orphan)
659 * Must be called on unlocked socket.
661 static void l2cap_sock_kill(struct sock *sk)
663 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
666 BT_DBG("sk %p state %d", sk, sk->sk_state);
668 /* Kill poor orphan */
669 bt_sock_unlink(&l2cap_sk_list, sk);
670 sock_set_flag(sk, SOCK_DEAD);
674 static void __l2cap_sock_close(struct sock *sk, int reason)
676 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
678 switch (sk->sk_state) {
680 l2cap_sock_cleanup_listen(sk);
685 if (sk->sk_type == SOCK_SEQPACKET) {
686 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
688 sk->sk_state = BT_DISCONN;
689 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
690 l2cap_send_disconn_req(conn, sk);
692 l2cap_chan_del(sk, reason);
696 if (sk->sk_type == SOCK_SEQPACKET) {
697 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
698 struct l2cap_conn_rsp rsp;
701 if (bt_sk(sk)->defer_setup)
702 result = L2CAP_CR_SEC_BLOCK;
704 result = L2CAP_CR_BAD_PSM;
706 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
707 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
708 rsp.result = cpu_to_le16(result);
709 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
710 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
711 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
713 l2cap_chan_del(sk, reason);
718 l2cap_chan_del(sk, reason);
722 sock_set_flag(sk, SOCK_ZAPPED);
727 /* Must be called on unlocked socket. */
728 static void l2cap_sock_close(struct sock *sk)
730 l2cap_sock_clear_timer(sk);
732 __l2cap_sock_close(sk, ECONNRESET);
737 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
739 struct l2cap_pinfo *pi = l2cap_pi(sk);
744 sk->sk_type = parent->sk_type;
745 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
747 pi->imtu = l2cap_pi(parent)->imtu;
748 pi->omtu = l2cap_pi(parent)->omtu;
749 pi->mode = l2cap_pi(parent)->mode;
750 pi->fcs = l2cap_pi(parent)->fcs;
751 pi->sec_level = l2cap_pi(parent)->sec_level;
752 pi->role_switch = l2cap_pi(parent)->role_switch;
753 pi->force_reliable = l2cap_pi(parent)->force_reliable;
755 pi->imtu = L2CAP_DEFAULT_MTU;
757 pi->mode = L2CAP_MODE_BASIC;
758 pi->fcs = L2CAP_FCS_CRC16;
759 pi->sec_level = BT_SECURITY_LOW;
761 pi->force_reliable = 0;
764 /* Default config options */
766 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
769 static struct proto l2cap_proto = {
771 .owner = THIS_MODULE,
772 .obj_size = sizeof(struct l2cap_pinfo)
775 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
779 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
783 sock_init_data(sock, sk);
784 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
786 sk->sk_destruct = l2cap_sock_destruct;
787 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
789 sock_reset_flag(sk, SOCK_ZAPPED);
791 sk->sk_protocol = proto;
792 sk->sk_state = BT_OPEN;
794 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
796 bt_sock_link(&l2cap_sk_list, sk);
800 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
804 BT_DBG("sock %p", sock);
806 sock->state = SS_UNCONNECTED;
808 if (sock->type != SOCK_SEQPACKET &&
809 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
810 return -ESOCKTNOSUPPORT;
812 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
815 sock->ops = &l2cap_sock_ops;
817 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
821 l2cap_sock_init(sk, NULL);
825 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
827 struct sock *sk = sock->sk;
828 struct sockaddr_l2 la;
833 if (!addr || addr->sa_family != AF_BLUETOOTH)
836 memset(&la, 0, sizeof(la));
837 len = min_t(unsigned int, sizeof(la), alen);
838 memcpy(&la, addr, len);
845 if (sk->sk_state != BT_OPEN) {
850 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
851 !capable(CAP_NET_BIND_SERVICE)) {
856 write_lock_bh(&l2cap_sk_list.lock);
858 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
861 /* Save source address */
862 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
863 l2cap_pi(sk)->psm = la.l2_psm;
864 l2cap_pi(sk)->sport = la.l2_psm;
865 sk->sk_state = BT_BOUND;
867 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
868 __le16_to_cpu(la.l2_psm) == 0x0003)
869 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
872 write_unlock_bh(&l2cap_sk_list.lock);
879 static int l2cap_do_connect(struct sock *sk)
881 bdaddr_t *src = &bt_sk(sk)->src;
882 bdaddr_t *dst = &bt_sk(sk)->dst;
883 struct l2cap_conn *conn;
884 struct hci_conn *hcon;
885 struct hci_dev *hdev;
889 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
892 hdev = hci_get_route(dst, src);
894 return -EHOSTUNREACH;
896 hci_dev_lock_bh(hdev);
900 if (sk->sk_type == SOCK_RAW) {
901 switch (l2cap_pi(sk)->sec_level) {
902 case BT_SECURITY_HIGH:
903 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
905 case BT_SECURITY_MEDIUM:
906 auth_type = HCI_AT_DEDICATED_BONDING;
909 auth_type = HCI_AT_NO_BONDING;
912 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
913 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
914 auth_type = HCI_AT_NO_BONDING_MITM;
916 auth_type = HCI_AT_NO_BONDING;
918 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
919 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
921 switch (l2cap_pi(sk)->sec_level) {
922 case BT_SECURITY_HIGH:
923 auth_type = HCI_AT_GENERAL_BONDING_MITM;
925 case BT_SECURITY_MEDIUM:
926 auth_type = HCI_AT_GENERAL_BONDING;
929 auth_type = HCI_AT_NO_BONDING;
934 hcon = hci_connect(hdev, ACL_LINK, dst,
935 l2cap_pi(sk)->sec_level, auth_type);
939 conn = l2cap_conn_add(hcon, 0);
947 /* Update source addr of the socket */
948 bacpy(src, conn->src);
950 l2cap_chan_add(conn, sk, NULL);
952 sk->sk_state = BT_CONNECT;
953 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
955 if (hcon->state == BT_CONNECTED) {
956 if (sk->sk_type != SOCK_SEQPACKET) {
957 l2cap_sock_clear_timer(sk);
958 sk->sk_state = BT_CONNECTED;
964 hci_dev_unlock_bh(hdev);
969 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
971 struct sock *sk = sock->sk;
972 struct sockaddr_l2 la;
977 if (!addr || addr->sa_family != AF_BLUETOOTH)
980 memset(&la, 0, sizeof(la));
981 len = min_t(unsigned int, sizeof(la), alen);
982 memcpy(&la, addr, len);
989 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
994 switch (l2cap_pi(sk)->mode) {
995 case L2CAP_MODE_BASIC:
997 case L2CAP_MODE_ERTM:
998 case L2CAP_MODE_STREAMING:
1007 switch (sk->sk_state) {
1011 /* Already connecting */
1015 /* Already connected */
1028 /* Set destination address and psm */
1029 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1030 l2cap_pi(sk)->psm = la.l2_psm;
1032 err = l2cap_do_connect(sk);
1037 err = bt_sock_wait_state(sk, BT_CONNECTED,
1038 sock_sndtimeo(sk, flags & O_NONBLOCK));
1044 static int l2cap_sock_listen(struct socket *sock, int backlog)
1046 struct sock *sk = sock->sk;
1049 BT_DBG("sk %p backlog %d", sk, backlog);
1053 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1058 switch (l2cap_pi(sk)->mode) {
1059 case L2CAP_MODE_BASIC:
1061 case L2CAP_MODE_ERTM:
1062 case L2CAP_MODE_STREAMING:
1071 if (!l2cap_pi(sk)->psm) {
1072 bdaddr_t *src = &bt_sk(sk)->src;
1077 write_lock_bh(&l2cap_sk_list.lock);
1079 for (psm = 0x1001; psm < 0x1100; psm += 2)
1080 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1081 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1082 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1087 write_unlock_bh(&l2cap_sk_list.lock);
1093 sk->sk_max_ack_backlog = backlog;
1094 sk->sk_ack_backlog = 0;
1095 sk->sk_state = BT_LISTEN;
1102 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1104 DECLARE_WAITQUEUE(wait, current);
1105 struct sock *sk = sock->sk, *nsk;
1109 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1111 if (sk->sk_state != BT_LISTEN) {
1116 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1118 BT_DBG("sk %p timeo %ld", sk, timeo);
1120 /* Wait for an incoming connection. (wake-one). */
1121 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1122 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1123 set_current_state(TASK_INTERRUPTIBLE);
1130 timeo = schedule_timeout(timeo);
1131 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1133 if (sk->sk_state != BT_LISTEN) {
1138 if (signal_pending(current)) {
1139 err = sock_intr_errno(timeo);
1143 set_current_state(TASK_RUNNING);
1144 remove_wait_queue(sk->sk_sleep, &wait);
1149 newsock->state = SS_CONNECTED;
1151 BT_DBG("new socket %p", nsk);
1158 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1160 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1161 struct sock *sk = sock->sk;
1163 BT_DBG("sock %p, sk %p", sock, sk);
1165 addr->sa_family = AF_BLUETOOTH;
1166 *len = sizeof(struct sockaddr_l2);
1169 la->l2_psm = l2cap_pi(sk)->psm;
1170 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1171 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1173 la->l2_psm = l2cap_pi(sk)->sport;
1174 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1175 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1181 static void l2cap_monitor_timeout(unsigned long arg)
1183 struct sock *sk = (void *) arg;
1186 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1187 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1191 l2cap_pi(sk)->retry_count++;
1192 __mod_monitor_timer();
1194 control = L2CAP_CTRL_POLL;
1195 control |= L2CAP_SUPER_RCV_READY;
1196 l2cap_send_sframe(l2cap_pi(sk), control);
1199 static void l2cap_retrans_timeout(unsigned long arg)
1201 struct sock *sk = (void *) arg;
1204 l2cap_pi(sk)->retry_count = 1;
1205 __mod_monitor_timer();
1207 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1209 control = L2CAP_CTRL_POLL;
1210 control |= L2CAP_SUPER_RCV_READY;
1211 l2cap_send_sframe(l2cap_pi(sk), control);
1214 static void l2cap_drop_acked_frames(struct sock *sk)
1216 struct sk_buff *skb;
1218 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1219 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1222 skb = skb_dequeue(TX_QUEUE(sk));
1225 l2cap_pi(sk)->unacked_frames--;
1228 if (!l2cap_pi(sk)->unacked_frames)
1229 del_timer(&l2cap_pi(sk)->retrans_timer);
1234 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1236 struct l2cap_pinfo *pi = l2cap_pi(sk);
1239 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1241 err = hci_send_acl(pi->conn->hcon, skb, 0);
1248 static int l2cap_streaming_send(struct sock *sk)
1250 struct sk_buff *skb, *tx_skb;
1251 struct l2cap_pinfo *pi = l2cap_pi(sk);
1255 while ((skb = sk->sk_send_head)) {
1256 tx_skb = skb_clone(skb, GFP_ATOMIC);
1258 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1259 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1260 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1262 err = l2cap_do_send(sk, tx_skb);
1264 l2cap_send_disconn_req(pi->conn, sk);
1268 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1270 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1271 sk->sk_send_head = NULL;
1273 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1275 skb = skb_dequeue(TX_QUEUE(sk));
1281 static int l2cap_ertm_send(struct sock *sk)
1283 struct sk_buff *skb, *tx_skb;
1284 struct l2cap_pinfo *pi = l2cap_pi(sk);
1288 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1291 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1292 tx_skb = skb_clone(skb, GFP_ATOMIC);
1294 if (pi->remote_max_tx &&
1295 bt_cb(skb)->retries == pi->remote_max_tx) {
1296 l2cap_send_disconn_req(pi->conn, sk);
1300 bt_cb(skb)->retries++;
1302 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1303 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1304 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1305 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1308 err = l2cap_do_send(sk, tx_skb);
1310 l2cap_send_disconn_req(pi->conn, sk);
1313 __mod_retrans_timer();
1315 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1316 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1318 pi->unacked_frames++;
1320 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1321 sk->sk_send_head = NULL;
1323 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1329 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1331 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1332 struct sk_buff **frag;
1335 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1342 /* Continuation fragments (no L2CAP header) */
1343 frag = &skb_shinfo(skb)->frag_list;
1345 count = min_t(unsigned int, conn->mtu, len);
1347 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1350 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1356 frag = &(*frag)->next;
1362 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1364 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1365 struct sk_buff *skb;
1366 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1367 struct l2cap_hdr *lh;
1369 BT_DBG("sk %p len %d", sk, (int)len);
1371 count = min_t(unsigned int, (conn->mtu - hlen), len);
1372 skb = bt_skb_send_alloc(sk, count + hlen,
1373 msg->msg_flags & MSG_DONTWAIT, &err);
1375 return ERR_PTR(-ENOMEM);
1377 /* Create L2CAP header */
1378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1379 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1380 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1381 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1383 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1384 if (unlikely(err < 0)) {
1386 return ERR_PTR(err);
1391 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1393 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1394 struct sk_buff *skb;
1395 int err, count, hlen = L2CAP_HDR_SIZE;
1396 struct l2cap_hdr *lh;
1398 BT_DBG("sk %p len %d", sk, (int)len);
1400 count = min_t(unsigned int, (conn->mtu - hlen), len);
1401 skb = bt_skb_send_alloc(sk, count + hlen,
1402 msg->msg_flags & MSG_DONTWAIT, &err);
1404 return ERR_PTR(-ENOMEM);
1406 /* Create L2CAP header */
1407 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1408 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1409 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1411 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1412 if (unlikely(err < 0)) {
1414 return ERR_PTR(err);
1419 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1421 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1422 struct sk_buff *skb;
1423 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1424 struct l2cap_hdr *lh;
1426 BT_DBG("sk %p len %d", sk, (int)len);
1431 count = min_t(unsigned int, (conn->mtu - hlen), len);
1432 skb = bt_skb_send_alloc(sk, count + hlen,
1433 msg->msg_flags & MSG_DONTWAIT, &err);
1435 return ERR_PTR(-ENOMEM);
1437 /* Create L2CAP header */
1438 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1439 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1440 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1441 put_unaligned_le16(control, skb_put(skb, 2));
1443 put_unaligned_le16(sdulen, skb_put(skb, 2));
1445 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1446 if (unlikely(err < 0)) {
1448 return ERR_PTR(err);
1451 bt_cb(skb)->retries = 0;
1455 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1457 struct l2cap_pinfo *pi = l2cap_pi(sk);
1458 struct sk_buff *skb;
1459 struct sk_buff_head sar_queue;
1463 __skb_queue_head_init(&sar_queue);
1464 control = L2CAP_SDU_START;
1465 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1467 return PTR_ERR(skb);
1469 __skb_queue_tail(&sar_queue, skb);
1470 len -= pi->max_pdu_size;
1471 size +=pi->max_pdu_size;
1477 if (len > pi->max_pdu_size) {
1478 control |= L2CAP_SDU_CONTINUE;
1479 buflen = pi->max_pdu_size;
1481 control |= L2CAP_SDU_END;
1485 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1487 skb_queue_purge(&sar_queue);
1488 return PTR_ERR(skb);
1491 __skb_queue_tail(&sar_queue, skb);
1496 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1497 if (sk->sk_send_head == NULL)
1498 sk->sk_send_head = sar_queue.next;
1503 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1505 struct sock *sk = sock->sk;
1506 struct l2cap_pinfo *pi = l2cap_pi(sk);
1507 struct sk_buff *skb;
1511 BT_DBG("sock %p, sk %p", sock, sk);
1513 err = sock_error(sk);
1517 if (msg->msg_flags & MSG_OOB)
1520 /* Check outgoing MTU */
1521 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1527 if (sk->sk_state != BT_CONNECTED) {
1532 /* Connectionless channel */
1533 if (sk->sk_type == SOCK_DGRAM) {
1534 skb = l2cap_create_connless_pdu(sk, msg, len);
1535 err = l2cap_do_send(sk, skb);
1540 case L2CAP_MODE_BASIC:
1541 /* Create a basic PDU */
1542 skb = l2cap_create_basic_pdu(sk, msg, len);
1548 err = l2cap_do_send(sk, skb);
1553 case L2CAP_MODE_ERTM:
1554 case L2CAP_MODE_STREAMING:
1555 /* Entire SDU fits into one PDU */
1556 if (len <= pi->max_pdu_size) {
1557 control = L2CAP_SDU_UNSEGMENTED;
1558 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1563 __skb_queue_tail(TX_QUEUE(sk), skb);
1564 if (sk->sk_send_head == NULL)
1565 sk->sk_send_head = skb;
1567 /* Segment SDU into multiples PDUs */
1568 err = l2cap_sar_segment_sdu(sk, msg, len);
1573 if (pi->mode == L2CAP_MODE_STREAMING)
1574 err = l2cap_streaming_send(sk);
1576 err = l2cap_ertm_send(sk);
1583 BT_DBG("bad state %1.1x", pi->mode);
1592 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1594 struct sock *sk = sock->sk;
1598 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1599 struct l2cap_conn_rsp rsp;
1601 sk->sk_state = BT_CONFIG;
1603 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1604 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1605 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1606 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1607 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1608 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1616 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1619 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1621 struct sock *sk = sock->sk;
1622 struct l2cap_options opts;
1626 BT_DBG("sk %p", sk);
1632 opts.imtu = l2cap_pi(sk)->imtu;
1633 opts.omtu = l2cap_pi(sk)->omtu;
1634 opts.flush_to = l2cap_pi(sk)->flush_to;
1635 opts.mode = l2cap_pi(sk)->mode;
1637 len = min_t(unsigned int, sizeof(opts), optlen);
1638 if (copy_from_user((char *) &opts, optval, len)) {
1643 l2cap_pi(sk)->imtu = opts.imtu;
1644 l2cap_pi(sk)->omtu = opts.omtu;
1645 l2cap_pi(sk)->mode = opts.mode;
1649 if (get_user(opt, (u32 __user *) optval)) {
1654 if (opt & L2CAP_LM_AUTH)
1655 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1656 if (opt & L2CAP_LM_ENCRYPT)
1657 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1658 if (opt & L2CAP_LM_SECURE)
1659 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1661 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1662 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1674 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1676 struct sock *sk = sock->sk;
1677 struct bt_security sec;
1681 BT_DBG("sk %p", sk);
1683 if (level == SOL_L2CAP)
1684 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1686 if (level != SOL_BLUETOOTH)
1687 return -ENOPROTOOPT;
1693 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1698 sec.level = BT_SECURITY_LOW;
1700 len = min_t(unsigned int, sizeof(sec), optlen);
1701 if (copy_from_user((char *) &sec, optval, len)) {
1706 if (sec.level < BT_SECURITY_LOW ||
1707 sec.level > BT_SECURITY_HIGH) {
1712 l2cap_pi(sk)->sec_level = sec.level;
1715 case BT_DEFER_SETUP:
1716 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1721 if (get_user(opt, (u32 __user *) optval)) {
1726 bt_sk(sk)->defer_setup = opt;
1738 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1740 struct sock *sk = sock->sk;
1741 struct l2cap_options opts;
1742 struct l2cap_conninfo cinfo;
1746 BT_DBG("sk %p", sk);
1748 if (get_user(len, optlen))
1755 opts.imtu = l2cap_pi(sk)->imtu;
1756 opts.omtu = l2cap_pi(sk)->omtu;
1757 opts.flush_to = l2cap_pi(sk)->flush_to;
1758 opts.mode = l2cap_pi(sk)->mode;
1760 len = min_t(unsigned int, len, sizeof(opts));
1761 if (copy_to_user(optval, (char *) &opts, len))
1767 switch (l2cap_pi(sk)->sec_level) {
1768 case BT_SECURITY_LOW:
1769 opt = L2CAP_LM_AUTH;
1771 case BT_SECURITY_MEDIUM:
1772 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1774 case BT_SECURITY_HIGH:
1775 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1783 if (l2cap_pi(sk)->role_switch)
1784 opt |= L2CAP_LM_MASTER;
1786 if (l2cap_pi(sk)->force_reliable)
1787 opt |= L2CAP_LM_RELIABLE;
1789 if (put_user(opt, (u32 __user *) optval))
1793 case L2CAP_CONNINFO:
1794 if (sk->sk_state != BT_CONNECTED &&
1795 !(sk->sk_state == BT_CONNECT2 &&
1796 bt_sk(sk)->defer_setup)) {
1801 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1802 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1804 len = min_t(unsigned int, len, sizeof(cinfo));
1805 if (copy_to_user(optval, (char *) &cinfo, len))
1819 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1821 struct sock *sk = sock->sk;
1822 struct bt_security sec;
1825 BT_DBG("sk %p", sk);
1827 if (level == SOL_L2CAP)
1828 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1830 if (level != SOL_BLUETOOTH)
1831 return -ENOPROTOOPT;
1833 if (get_user(len, optlen))
1840 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1845 sec.level = l2cap_pi(sk)->sec_level;
1847 len = min_t(unsigned int, len, sizeof(sec));
1848 if (copy_to_user(optval, (char *) &sec, len))
1853 case BT_DEFER_SETUP:
1854 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1859 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1873 static int l2cap_sock_shutdown(struct socket *sock, int how)
1875 struct sock *sk = sock->sk;
1878 BT_DBG("sock %p, sk %p", sock, sk);
1884 if (!sk->sk_shutdown) {
1885 sk->sk_shutdown = SHUTDOWN_MASK;
1886 l2cap_sock_clear_timer(sk);
1887 __l2cap_sock_close(sk, 0);
1889 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1890 err = bt_sock_wait_state(sk, BT_CLOSED,
1897 static int l2cap_sock_release(struct socket *sock)
1899 struct sock *sk = sock->sk;
1902 BT_DBG("sock %p, sk %p", sock, sk);
1907 err = l2cap_sock_shutdown(sock, 2);
1910 l2cap_sock_kill(sk);
1914 static void l2cap_chan_ready(struct sock *sk)
1916 struct sock *parent = bt_sk(sk)->parent;
1918 BT_DBG("sk %p, parent %p", sk, parent);
1920 l2cap_pi(sk)->conf_state = 0;
1921 l2cap_sock_clear_timer(sk);
1924 /* Outgoing channel.
1925 * Wake up socket sleeping on connect.
1927 sk->sk_state = BT_CONNECTED;
1928 sk->sk_state_change(sk);
1930 /* Incoming channel.
1931 * Wake up socket sleeping on accept.
1933 parent->sk_data_ready(parent, 0);
1937 /* Copy frame to all raw sockets on that connection */
1938 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1940 struct l2cap_chan_list *l = &conn->chan_list;
1941 struct sk_buff *nskb;
1944 BT_DBG("conn %p", conn);
1946 read_lock(&l->lock);
1947 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1948 if (sk->sk_type != SOCK_RAW)
1951 /* Don't send frame to the socket it came from */
1954 nskb = skb_clone(skb, GFP_ATOMIC);
1958 if (sock_queue_rcv_skb(sk, nskb))
1961 read_unlock(&l->lock);
1964 /* ---- L2CAP signalling commands ---- */
1965 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1966 u8 code, u8 ident, u16 dlen, void *data)
1968 struct sk_buff *skb, **frag;
1969 struct l2cap_cmd_hdr *cmd;
1970 struct l2cap_hdr *lh;
1973 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1974 conn, code, ident, dlen);
1976 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1977 count = min_t(unsigned int, conn->mtu, len);
1979 skb = bt_skb_alloc(count, GFP_ATOMIC);
1983 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1984 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1985 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1987 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1990 cmd->len = cpu_to_le16(dlen);
1993 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1994 memcpy(skb_put(skb, count), data, count);
2000 /* Continuation fragments (no L2CAP header) */
2001 frag = &skb_shinfo(skb)->frag_list;
2003 count = min_t(unsigned int, conn->mtu, len);
2005 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2009 memcpy(skb_put(*frag, count), data, count);
2014 frag = &(*frag)->next;
2024 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2026 struct l2cap_conf_opt *opt = *ptr;
2029 len = L2CAP_CONF_OPT_SIZE + opt->len;
2037 *val = *((u8 *) opt->val);
2041 *val = __le16_to_cpu(*((__le16 *) opt->val));
2045 *val = __le32_to_cpu(*((__le32 *) opt->val));
2049 *val = (unsigned long) opt->val;
2053 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2057 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2059 struct l2cap_conf_opt *opt = *ptr;
2061 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2068 *((u8 *) opt->val) = val;
2072 *((__le16 *) opt->val) = cpu_to_le16(val);
2076 *((__le32 *) opt->val) = cpu_to_le32(val);
2080 memcpy(opt->val, (void *) val, len);
2084 *ptr += L2CAP_CONF_OPT_SIZE + len;
2087 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2089 u32 local_feat_mask = l2cap_feat_mask;
2091 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2094 case L2CAP_MODE_ERTM:
2095 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2096 case L2CAP_MODE_STREAMING:
2097 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2103 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2106 case L2CAP_MODE_STREAMING:
2107 case L2CAP_MODE_ERTM:
2108 if (l2cap_mode_supported(mode, remote_feat_mask))
2112 return L2CAP_MODE_BASIC;
2116 static int l2cap_build_conf_req(struct sock *sk, void *data)
2118 struct l2cap_pinfo *pi = l2cap_pi(sk);
2119 struct l2cap_conf_req *req = data;
2120 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2121 void *ptr = req->data;
2123 BT_DBG("sk %p", sk);
2125 if (pi->num_conf_req || pi->num_conf_rsp)
2129 case L2CAP_MODE_STREAMING:
2130 case L2CAP_MODE_ERTM:
2131 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2132 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2133 l2cap_send_disconn_req(pi->conn, sk);
2136 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2142 case L2CAP_MODE_BASIC:
2143 if (pi->imtu != L2CAP_DEFAULT_MTU)
2144 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2147 case L2CAP_MODE_ERTM:
2148 rfc.mode = L2CAP_MODE_ERTM;
2149 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2150 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2151 rfc.retrans_timeout = 0;
2152 rfc.monitor_timeout = 0;
2153 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2155 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2156 sizeof(rfc), (unsigned long) &rfc);
2159 case L2CAP_MODE_STREAMING:
2160 rfc.mode = L2CAP_MODE_STREAMING;
2162 rfc.max_transmit = 0;
2163 rfc.retrans_timeout = 0;
2164 rfc.monitor_timeout = 0;
2165 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2168 sizeof(rfc), (unsigned long) &rfc);
2172 /* FIXME: Need actual value of the flush timeout */
2173 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2174 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2176 req->dcid = cpu_to_le16(pi->dcid);
2177 req->flags = cpu_to_le16(0);
2182 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2184 struct l2cap_pinfo *pi = l2cap_pi(sk);
2185 struct l2cap_conf_rsp *rsp = data;
2186 void *ptr = rsp->data;
2187 void *req = pi->conf_req;
2188 int len = pi->conf_len;
2189 int type, hint, olen;
2191 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2192 u16 mtu = L2CAP_DEFAULT_MTU;
2193 u16 result = L2CAP_CONF_SUCCESS;
2195 BT_DBG("sk %p", sk);
2197 while (len >= L2CAP_CONF_OPT_SIZE) {
2198 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2200 hint = type & L2CAP_CONF_HINT;
2201 type &= L2CAP_CONF_MASK;
2204 case L2CAP_CONF_MTU:
2208 case L2CAP_CONF_FLUSH_TO:
2212 case L2CAP_CONF_QOS:
2215 case L2CAP_CONF_RFC:
2216 if (olen == sizeof(rfc))
2217 memcpy(&rfc, (void *) val, olen);
2224 result = L2CAP_CONF_UNKNOWN;
2225 *((u8 *) ptr++) = type;
2230 if (pi->num_conf_rsp || pi->num_conf_req)
2234 case L2CAP_MODE_STREAMING:
2235 case L2CAP_MODE_ERTM:
2236 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2237 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2238 return -ECONNREFUSED;
2241 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2246 if (pi->mode != rfc.mode) {
2247 result = L2CAP_CONF_UNACCEPT;
2248 rfc.mode = pi->mode;
2250 if (pi->num_conf_rsp == 1)
2251 return -ECONNREFUSED;
2253 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2254 sizeof(rfc), (unsigned long) &rfc);
2258 if (result == L2CAP_CONF_SUCCESS) {
2259 /* Configure output options and let the other side know
2260 * which ones we don't like. */
2262 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2263 result = L2CAP_CONF_UNACCEPT;
2266 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2268 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2271 case L2CAP_MODE_BASIC:
2272 pi->fcs = L2CAP_FCS_NONE;
2273 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2276 case L2CAP_MODE_ERTM:
2277 pi->remote_tx_win = rfc.txwin_size;
2278 pi->remote_max_tx = rfc.max_transmit;
2279 pi->max_pdu_size = rfc.max_pdu_size;
2281 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2282 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2284 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2287 case L2CAP_MODE_STREAMING:
2288 pi->remote_tx_win = rfc.txwin_size;
2289 pi->max_pdu_size = rfc.max_pdu_size;
2291 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2295 result = L2CAP_CONF_UNACCEPT;
2297 memset(&rfc, 0, sizeof(rfc));
2298 rfc.mode = pi->mode;
2301 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2302 sizeof(rfc), (unsigned long) &rfc);
2304 if (result == L2CAP_CONF_SUCCESS)
2305 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2307 rsp->scid = cpu_to_le16(pi->dcid);
2308 rsp->result = cpu_to_le16(result);
2309 rsp->flags = cpu_to_le16(0x0000);
2314 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2316 struct l2cap_pinfo *pi = l2cap_pi(sk);
2317 struct l2cap_conf_req *req = data;
2318 void *ptr = req->data;
2321 struct l2cap_conf_rfc rfc;
2323 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2325 while (len >= L2CAP_CONF_OPT_SIZE) {
2326 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2329 case L2CAP_CONF_MTU:
2330 if (val < L2CAP_DEFAULT_MIN_MTU) {
2331 *result = L2CAP_CONF_UNACCEPT;
2332 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2338 case L2CAP_CONF_FLUSH_TO:
2340 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2344 case L2CAP_CONF_RFC:
2345 if (olen == sizeof(rfc))
2346 memcpy(&rfc, (void *)val, olen);
2348 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2349 rfc.mode != pi->mode)
2350 return -ECONNREFUSED;
2352 pi->mode = rfc.mode;
2355 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2356 sizeof(rfc), (unsigned long) &rfc);
2361 if (*result == L2CAP_CONF_SUCCESS) {
2363 case L2CAP_MODE_ERTM:
2364 pi->remote_tx_win = rfc.txwin_size;
2365 pi->retrans_timeout = rfc.retrans_timeout;
2366 pi->monitor_timeout = rfc.monitor_timeout;
2367 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2369 case L2CAP_MODE_STREAMING:
2370 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2375 req->dcid = cpu_to_le16(pi->dcid);
2376 req->flags = cpu_to_le16(0x0000);
2381 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2383 struct l2cap_conf_rsp *rsp = data;
2384 void *ptr = rsp->data;
2386 BT_DBG("sk %p", sk);
2388 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2389 rsp->result = cpu_to_le16(result);
2390 rsp->flags = cpu_to_le16(flags);
2395 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2397 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2399 if (rej->reason != 0x0000)
2402 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2403 cmd->ident == conn->info_ident) {
2404 del_timer(&conn->info_timer);
2406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2407 conn->info_ident = 0;
2409 l2cap_conn_start(conn);
2415 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2417 struct l2cap_chan_list *list = &conn->chan_list;
2418 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2419 struct l2cap_conn_rsp rsp;
2420 struct sock *sk, *parent;
2421 int result, status = L2CAP_CS_NO_INFO;
2423 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2424 __le16 psm = req->psm;
2426 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2428 /* Check if we have socket listening on psm */
2429 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2431 result = L2CAP_CR_BAD_PSM;
2435 /* Check if the ACL is secure enough (if not SDP) */
2436 if (psm != cpu_to_le16(0x0001) &&
2437 !hci_conn_check_link_mode(conn->hcon)) {
2438 conn->disc_reason = 0x05;
2439 result = L2CAP_CR_SEC_BLOCK;
2443 result = L2CAP_CR_NO_MEM;
2445 /* Check for backlog size */
2446 if (sk_acceptq_is_full(parent)) {
2447 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2451 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2455 write_lock_bh(&list->lock);
2457 /* Check if we already have channel with that dcid */
2458 if (__l2cap_get_chan_by_dcid(list, scid)) {
2459 write_unlock_bh(&list->lock);
2460 sock_set_flag(sk, SOCK_ZAPPED);
2461 l2cap_sock_kill(sk);
2465 hci_conn_hold(conn->hcon);
2467 l2cap_sock_init(sk, parent);
2468 bacpy(&bt_sk(sk)->src, conn->src);
2469 bacpy(&bt_sk(sk)->dst, conn->dst);
2470 l2cap_pi(sk)->psm = psm;
2471 l2cap_pi(sk)->dcid = scid;
2473 __l2cap_chan_add(conn, sk, parent);
2474 dcid = l2cap_pi(sk)->scid;
2476 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2478 l2cap_pi(sk)->ident = cmd->ident;
2480 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2481 if (l2cap_check_security(sk)) {
2482 if (bt_sk(sk)->defer_setup) {
2483 sk->sk_state = BT_CONNECT2;
2484 result = L2CAP_CR_PEND;
2485 status = L2CAP_CS_AUTHOR_PEND;
2486 parent->sk_data_ready(parent, 0);
2488 sk->sk_state = BT_CONFIG;
2489 result = L2CAP_CR_SUCCESS;
2490 status = L2CAP_CS_NO_INFO;
2493 sk->sk_state = BT_CONNECT2;
2494 result = L2CAP_CR_PEND;
2495 status = L2CAP_CS_AUTHEN_PEND;
2498 sk->sk_state = BT_CONNECT2;
2499 result = L2CAP_CR_PEND;
2500 status = L2CAP_CS_NO_INFO;
2503 write_unlock_bh(&list->lock);
2506 bh_unlock_sock(parent);
2509 rsp.scid = cpu_to_le16(scid);
2510 rsp.dcid = cpu_to_le16(dcid);
2511 rsp.result = cpu_to_le16(result);
2512 rsp.status = cpu_to_le16(status);
2513 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2515 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2516 struct l2cap_info_req info;
2517 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2519 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2520 conn->info_ident = l2cap_get_ident(conn);
2522 mod_timer(&conn->info_timer, jiffies +
2523 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2525 l2cap_send_cmd(conn, conn->info_ident,
2526 L2CAP_INFO_REQ, sizeof(info), &info);
2532 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2534 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2535 u16 scid, dcid, result, status;
2539 scid = __le16_to_cpu(rsp->scid);
2540 dcid = __le16_to_cpu(rsp->dcid);
2541 result = __le16_to_cpu(rsp->result);
2542 status = __le16_to_cpu(rsp->status);
2544 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2547 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2551 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2557 case L2CAP_CR_SUCCESS:
2558 sk->sk_state = BT_CONFIG;
2559 l2cap_pi(sk)->ident = 0;
2560 l2cap_pi(sk)->dcid = dcid;
2561 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2563 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2565 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2566 l2cap_build_conf_req(sk, req), req);
2567 l2cap_pi(sk)->num_conf_req++;
2571 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2575 l2cap_chan_del(sk, ECONNREFUSED);
2583 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2585 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2591 dcid = __le16_to_cpu(req->dcid);
2592 flags = __le16_to_cpu(req->flags);
2594 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2596 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2600 if (sk->sk_state == BT_DISCONN)
2603 /* Reject if config buffer is too small. */
2604 len = cmd_len - sizeof(*req);
2605 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2606 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2607 l2cap_build_conf_rsp(sk, rsp,
2608 L2CAP_CONF_REJECT, flags), rsp);
2613 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2614 l2cap_pi(sk)->conf_len += len;
2616 if (flags & 0x0001) {
2617 /* Incomplete config. Send empty response. */
2618 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2619 l2cap_build_conf_rsp(sk, rsp,
2620 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2624 /* Complete config. */
2625 len = l2cap_parse_conf_req(sk, rsp);
2627 l2cap_send_disconn_req(conn, sk);
2631 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2632 l2cap_pi(sk)->num_conf_rsp++;
2634 /* Reset config buffer. */
2635 l2cap_pi(sk)->conf_len = 0;
2637 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2640 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2641 sk->sk_state = BT_CONNECTED;
2642 l2cap_pi(sk)->next_tx_seq = 0;
2643 l2cap_pi(sk)->expected_ack_seq = 0;
2644 l2cap_pi(sk)->unacked_frames = 0;
2646 setup_timer(&l2cap_pi(sk)->retrans_timer,
2647 l2cap_retrans_timeout, (unsigned long) sk);
2648 setup_timer(&l2cap_pi(sk)->monitor_timer,
2649 l2cap_monitor_timeout, (unsigned long) sk);
2651 __skb_queue_head_init(TX_QUEUE(sk));
2652 l2cap_chan_ready(sk);
2656 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2658 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2659 l2cap_build_conf_req(sk, buf), buf);
2660 l2cap_pi(sk)->num_conf_req++;
2668 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2670 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2671 u16 scid, flags, result;
2674 scid = __le16_to_cpu(rsp->scid);
2675 flags = __le16_to_cpu(rsp->flags);
2676 result = __le16_to_cpu(rsp->result);
2678 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2679 scid, flags, result);
2681 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2686 case L2CAP_CONF_SUCCESS:
2689 case L2CAP_CONF_UNACCEPT:
2690 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2691 int len = cmd->len - sizeof(*rsp);
2694 /* throw out any old stored conf requests */
2695 result = L2CAP_CONF_SUCCESS;
2696 len = l2cap_parse_conf_rsp(sk, rsp->data,
2699 l2cap_send_disconn_req(conn, sk);
2703 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2704 L2CAP_CONF_REQ, len, req);
2705 l2cap_pi(sk)->num_conf_req++;
2706 if (result != L2CAP_CONF_SUCCESS)
2712 sk->sk_state = BT_DISCONN;
2713 sk->sk_err = ECONNRESET;
2714 l2cap_sock_set_timer(sk, HZ * 5);
2715 l2cap_send_disconn_req(conn, sk);
2722 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2724 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2725 sk->sk_state = BT_CONNECTED;
2726 l2cap_pi(sk)->expected_tx_seq = 0;
2727 l2cap_pi(sk)->num_to_ack = 0;
2728 __skb_queue_head_init(TX_QUEUE(sk));
2729 l2cap_chan_ready(sk);
2737 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2739 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2740 struct l2cap_disconn_rsp rsp;
2744 scid = __le16_to_cpu(req->scid);
2745 dcid = __le16_to_cpu(req->dcid);
2747 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2749 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2753 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2755 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2757 sk->sk_shutdown = SHUTDOWN_MASK;
2759 skb_queue_purge(TX_QUEUE(sk));
2760 del_timer(&l2cap_pi(sk)->retrans_timer);
2761 del_timer(&l2cap_pi(sk)->monitor_timer);
2763 l2cap_chan_del(sk, ECONNRESET);
2766 l2cap_sock_kill(sk);
2770 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2772 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2776 scid = __le16_to_cpu(rsp->scid);
2777 dcid = __le16_to_cpu(rsp->dcid);
2779 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2781 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2785 skb_queue_purge(TX_QUEUE(sk));
2786 del_timer(&l2cap_pi(sk)->retrans_timer);
2787 del_timer(&l2cap_pi(sk)->monitor_timer);
2789 l2cap_chan_del(sk, 0);
2792 l2cap_sock_kill(sk);
2796 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2798 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2801 type = __le16_to_cpu(req->type);
2803 BT_DBG("type 0x%4.4x", type);
2805 if (type == L2CAP_IT_FEAT_MASK) {
2807 u32 feat_mask = l2cap_feat_mask;
2808 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2809 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2810 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2812 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2813 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2814 l2cap_send_cmd(conn, cmd->ident,
2815 L2CAP_INFO_RSP, sizeof(buf), buf);
2816 } else if (type == L2CAP_IT_FIXED_CHAN) {
2818 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2819 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2820 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2821 memcpy(buf + 4, l2cap_fixed_chan, 8);
2822 l2cap_send_cmd(conn, cmd->ident,
2823 L2CAP_INFO_RSP, sizeof(buf), buf);
2825 struct l2cap_info_rsp rsp;
2826 rsp.type = cpu_to_le16(type);
2827 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2828 l2cap_send_cmd(conn, cmd->ident,
2829 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2835 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2837 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2840 type = __le16_to_cpu(rsp->type);
2841 result = __le16_to_cpu(rsp->result);
2843 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2845 del_timer(&conn->info_timer);
2847 if (type == L2CAP_IT_FEAT_MASK) {
2848 conn->feat_mask = get_unaligned_le32(rsp->data);
2850 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2851 struct l2cap_info_req req;
2852 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2854 conn->info_ident = l2cap_get_ident(conn);
2856 l2cap_send_cmd(conn, conn->info_ident,
2857 L2CAP_INFO_REQ, sizeof(req), &req);
2859 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2860 conn->info_ident = 0;
2862 l2cap_conn_start(conn);
2864 } else if (type == L2CAP_IT_FIXED_CHAN) {
2865 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2866 conn->info_ident = 0;
2868 l2cap_conn_start(conn);
2874 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2876 u8 *data = skb->data;
2878 struct l2cap_cmd_hdr cmd;
2881 l2cap_raw_recv(conn, skb);
2883 while (len >= L2CAP_CMD_HDR_SIZE) {
2885 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2886 data += L2CAP_CMD_HDR_SIZE;
2887 len -= L2CAP_CMD_HDR_SIZE;
2889 cmd_len = le16_to_cpu(cmd.len);
2891 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2893 if (cmd_len > len || !cmd.ident) {
2894 BT_DBG("corrupted command");
2899 case L2CAP_COMMAND_REJ:
2900 l2cap_command_rej(conn, &cmd, data);
2903 case L2CAP_CONN_REQ:
2904 err = l2cap_connect_req(conn, &cmd, data);
2907 case L2CAP_CONN_RSP:
2908 err = l2cap_connect_rsp(conn, &cmd, data);
2911 case L2CAP_CONF_REQ:
2912 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2915 case L2CAP_CONF_RSP:
2916 err = l2cap_config_rsp(conn, &cmd, data);
2919 case L2CAP_DISCONN_REQ:
2920 err = l2cap_disconnect_req(conn, &cmd, data);
2923 case L2CAP_DISCONN_RSP:
2924 err = l2cap_disconnect_rsp(conn, &cmd, data);
2927 case L2CAP_ECHO_REQ:
2928 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2931 case L2CAP_ECHO_RSP:
2934 case L2CAP_INFO_REQ:
2935 err = l2cap_information_req(conn, &cmd, data);
2938 case L2CAP_INFO_RSP:
2939 err = l2cap_information_rsp(conn, &cmd, data);
2943 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2949 struct l2cap_cmd_rej rej;
2950 BT_DBG("error %d", err);
2952 /* FIXME: Map err to a valid reason */
2953 rej.reason = cpu_to_le16(0);
2954 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2964 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2966 struct l2cap_pinfo *pi = l2cap_pi(sk);
2967 struct sk_buff *_skb;
2970 switch (control & L2CAP_CTRL_SAR) {
2971 case L2CAP_SDU_UNSEGMENTED:
2972 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2977 err = sock_queue_rcv_skb(sk, skb);
2983 case L2CAP_SDU_START:
2984 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2989 pi->sdu_len = get_unaligned_le16(skb->data);
2992 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2998 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3000 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3001 pi->partial_sdu_len = skb->len;
3005 case L2CAP_SDU_CONTINUE:
3006 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3009 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3011 pi->partial_sdu_len += skb->len;
3012 if (pi->partial_sdu_len > pi->sdu_len)
3020 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3023 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3025 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3026 pi->partial_sdu_len += skb->len;
3028 if (pi->partial_sdu_len == pi->sdu_len) {
3029 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3030 err = sock_queue_rcv_skb(sk, _skb);
3044 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3046 struct l2cap_pinfo *pi = l2cap_pi(sk);
3047 u8 tx_seq = __get_txseq(rx_control);
3051 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3053 if (tx_seq == pi->expected_tx_seq) {
3054 if (pi->conn_state & L2CAP_CONN_UNDER_REJ)
3055 pi->conn_state &= ~L2CAP_CONN_UNDER_REJ;
3057 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3061 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3062 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3063 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3064 tx_control |= L2CAP_SUPER_RCV_READY;
3065 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3069 /* Unexpected txSeq. Send a REJ S-frame */
3071 if (!(pi->conn_state & L2CAP_CONN_UNDER_REJ)) {
3072 tx_control |= L2CAP_SUPER_REJECT;
3073 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3074 pi->conn_state |= L2CAP_CONN_UNDER_REJ;
3082 return l2cap_send_sframe(pi, tx_control);
3085 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3087 struct l2cap_pinfo *pi = l2cap_pi(sk);
3089 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3091 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3092 case L2CAP_SUPER_RCV_READY:
3093 if (rx_control & L2CAP_CTRL_POLL) {
3094 u16 control = L2CAP_CTRL_FINAL;
3095 control |= L2CAP_SUPER_RCV_READY;
3096 l2cap_send_sframe(l2cap_pi(sk), control);
3097 } else if (rx_control & L2CAP_CTRL_FINAL) {
3098 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3101 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3102 del_timer(&pi->monitor_timer);
3104 if (pi->unacked_frames > 0)
3105 __mod_retrans_timer();
3107 pi->expected_ack_seq = __get_reqseq(rx_control);
3108 l2cap_drop_acked_frames(sk);
3109 if (pi->unacked_frames > 0)
3110 __mod_retrans_timer();
3111 l2cap_ertm_send(sk);
3115 case L2CAP_SUPER_REJECT:
3116 pi->expected_ack_seq = __get_reqseq(rx_control);
3117 l2cap_drop_acked_frames(sk);
3119 sk->sk_send_head = TX_QUEUE(sk)->next;
3120 pi->next_tx_seq = pi->expected_ack_seq;
3122 l2cap_ertm_send(sk);
3126 case L2CAP_SUPER_RCV_NOT_READY:
3127 case L2CAP_SUPER_SELECT_REJECT:
3134 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3137 struct l2cap_pinfo *pi;
3142 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3144 BT_DBG("unknown cid 0x%4.4x", cid);
3150 BT_DBG("sk %p, len %d", sk, skb->len);
3152 if (sk->sk_state != BT_CONNECTED)
3156 case L2CAP_MODE_BASIC:
3157 /* If socket recv buffers overflows we drop data here
3158 * which is *bad* because L2CAP has to be reliable.
3159 * But we don't have any other choice. L2CAP doesn't
3160 * provide flow control mechanism. */
3162 if (pi->imtu < skb->len)
3165 if (!sock_queue_rcv_skb(sk, skb))
3169 case L2CAP_MODE_ERTM:
3170 control = get_unaligned_le16(skb->data);
3174 if (__is_sar_start(control))
3178 * We can just drop the corrupted I-frame here.
3179 * Receiver will miss it and start proper recovery
3180 * procedures and ask retransmission.
3182 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3185 if (__is_iframe(control))
3186 err = l2cap_data_channel_iframe(sk, control, skb);
3188 err = l2cap_data_channel_sframe(sk, control, skb);
3194 case L2CAP_MODE_STREAMING:
3195 control = get_unaligned_le16(skb->data);
3199 if (__is_sar_start(control))
3202 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3205 tx_seq = __get_txseq(control);
3207 if (pi->expected_tx_seq == tx_seq)
3208 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3210 pi->expected_tx_seq = tx_seq + 1;
3212 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3217 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3231 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3235 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3239 BT_DBG("sk %p, len %d", sk, skb->len);
3241 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3244 if (l2cap_pi(sk)->imtu < skb->len)
3247 if (!sock_queue_rcv_skb(sk, skb))
3259 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3261 struct l2cap_hdr *lh = (void *) skb->data;
3265 skb_pull(skb, L2CAP_HDR_SIZE);
3266 cid = __le16_to_cpu(lh->cid);
3267 len = __le16_to_cpu(lh->len);
3269 if (len != skb->len) {
3274 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3277 case L2CAP_CID_SIGNALING:
3278 l2cap_sig_channel(conn, skb);
3281 case L2CAP_CID_CONN_LESS:
3282 psm = get_unaligned((__le16 *) skb->data);
3284 l2cap_conless_channel(conn, psm, skb);
3288 l2cap_data_channel(conn, cid, skb);
3293 /* ---- L2CAP interface with lower layer (HCI) ---- */
3295 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3297 int exact = 0, lm1 = 0, lm2 = 0;
3298 register struct sock *sk;
3299 struct hlist_node *node;
3301 if (type != ACL_LINK)
3304 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3306 /* Find listening sockets and check their link_mode */
3307 read_lock(&l2cap_sk_list.lock);
3308 sk_for_each(sk, node, &l2cap_sk_list.head) {
3309 if (sk->sk_state != BT_LISTEN)
3312 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3313 lm1 |= HCI_LM_ACCEPT;
3314 if (l2cap_pi(sk)->role_switch)
3315 lm1 |= HCI_LM_MASTER;
3317 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3318 lm2 |= HCI_LM_ACCEPT;
3319 if (l2cap_pi(sk)->role_switch)
3320 lm2 |= HCI_LM_MASTER;
3323 read_unlock(&l2cap_sk_list.lock);
3325 return exact ? lm1 : lm2;
3328 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3330 struct l2cap_conn *conn;
3332 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3334 if (hcon->type != ACL_LINK)
3338 conn = l2cap_conn_add(hcon, status);
3340 l2cap_conn_ready(conn);
3342 l2cap_conn_del(hcon, bt_err(status));
3347 static int l2cap_disconn_ind(struct hci_conn *hcon)
3349 struct l2cap_conn *conn = hcon->l2cap_data;
3351 BT_DBG("hcon %p", hcon);
3353 if (hcon->type != ACL_LINK || !conn)
3356 return conn->disc_reason;
3359 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3361 BT_DBG("hcon %p reason %d", hcon, reason);
3363 if (hcon->type != ACL_LINK)
3366 l2cap_conn_del(hcon, bt_err(reason));
3371 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3373 if (sk->sk_type != SOCK_SEQPACKET)
3376 if (encrypt == 0x00) {
3377 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3378 l2cap_sock_clear_timer(sk);
3379 l2cap_sock_set_timer(sk, HZ * 5);
3380 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3381 __l2cap_sock_close(sk, ECONNREFUSED);
3383 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3384 l2cap_sock_clear_timer(sk);
3388 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3390 struct l2cap_chan_list *l;
3391 struct l2cap_conn *conn = hcon->l2cap_data;
3397 l = &conn->chan_list;
3399 BT_DBG("conn %p", conn);
3401 read_lock(&l->lock);
3403 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3406 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3411 if (!status && (sk->sk_state == BT_CONNECTED ||
3412 sk->sk_state == BT_CONFIG)) {
3413 l2cap_check_encryption(sk, encrypt);
3418 if (sk->sk_state == BT_CONNECT) {
3420 struct l2cap_conn_req req;
3421 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3422 req.psm = l2cap_pi(sk)->psm;
3424 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3426 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3427 L2CAP_CONN_REQ, sizeof(req), &req);
3429 l2cap_sock_clear_timer(sk);
3430 l2cap_sock_set_timer(sk, HZ / 10);
3432 } else if (sk->sk_state == BT_CONNECT2) {
3433 struct l2cap_conn_rsp rsp;
3437 sk->sk_state = BT_CONFIG;
3438 result = L2CAP_CR_SUCCESS;
3440 sk->sk_state = BT_DISCONN;
3441 l2cap_sock_set_timer(sk, HZ / 10);
3442 result = L2CAP_CR_SEC_BLOCK;
3445 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3446 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3447 rsp.result = cpu_to_le16(result);
3448 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3449 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3450 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3456 read_unlock(&l->lock);
3461 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3463 struct l2cap_conn *conn = hcon->l2cap_data;
3465 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3468 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3470 if (flags & ACL_START) {
3471 struct l2cap_hdr *hdr;
3475 BT_ERR("Unexpected start frame (len %d)", skb->len);
3476 kfree_skb(conn->rx_skb);
3477 conn->rx_skb = NULL;
3479 l2cap_conn_unreliable(conn, ECOMM);
3483 BT_ERR("Frame is too short (len %d)", skb->len);
3484 l2cap_conn_unreliable(conn, ECOMM);
3488 hdr = (struct l2cap_hdr *) skb->data;
3489 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3491 if (len == skb->len) {
3492 /* Complete frame received */
3493 l2cap_recv_frame(conn, skb);
3497 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3499 if (skb->len > len) {
3500 BT_ERR("Frame is too long (len %d, expected len %d)",
3502 l2cap_conn_unreliable(conn, ECOMM);
3506 /* Allocate skb for the complete frame (with header) */
3507 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3511 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3513 conn->rx_len = len - skb->len;
3515 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3517 if (!conn->rx_len) {
3518 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3519 l2cap_conn_unreliable(conn, ECOMM);
3523 if (skb->len > conn->rx_len) {
3524 BT_ERR("Fragment is too long (len %d, expected %d)",
3525 skb->len, conn->rx_len);
3526 kfree_skb(conn->rx_skb);
3527 conn->rx_skb = NULL;
3529 l2cap_conn_unreliable(conn, ECOMM);
3533 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3535 conn->rx_len -= skb->len;
3537 if (!conn->rx_len) {
3538 /* Complete frame received */
3539 l2cap_recv_frame(conn, conn->rx_skb);
3540 conn->rx_skb = NULL;
3549 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3552 struct hlist_node *node;
3555 read_lock_bh(&l2cap_sk_list.lock);
3557 sk_for_each(sk, node, &l2cap_sk_list.head) {
3558 struct l2cap_pinfo *pi = l2cap_pi(sk);
3560 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3561 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3562 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3563 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3566 read_unlock_bh(&l2cap_sk_list.lock);
3571 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3573 static const struct proto_ops l2cap_sock_ops = {
3574 .family = PF_BLUETOOTH,
3575 .owner = THIS_MODULE,
3576 .release = l2cap_sock_release,
3577 .bind = l2cap_sock_bind,
3578 .connect = l2cap_sock_connect,
3579 .listen = l2cap_sock_listen,
3580 .accept = l2cap_sock_accept,
3581 .getname = l2cap_sock_getname,
3582 .sendmsg = l2cap_sock_sendmsg,
3583 .recvmsg = l2cap_sock_recvmsg,
3584 .poll = bt_sock_poll,
3585 .ioctl = bt_sock_ioctl,
3586 .mmap = sock_no_mmap,
3587 .socketpair = sock_no_socketpair,
3588 .shutdown = l2cap_sock_shutdown,
3589 .setsockopt = l2cap_sock_setsockopt,
3590 .getsockopt = l2cap_sock_getsockopt
3593 static struct net_proto_family l2cap_sock_family_ops = {
3594 .family = PF_BLUETOOTH,
3595 .owner = THIS_MODULE,
3596 .create = l2cap_sock_create,
3599 static struct hci_proto l2cap_hci_proto = {
3601 .id = HCI_PROTO_L2CAP,
3602 .connect_ind = l2cap_connect_ind,
3603 .connect_cfm = l2cap_connect_cfm,
3604 .disconn_ind = l2cap_disconn_ind,
3605 .disconn_cfm = l2cap_disconn_cfm,
3606 .security_cfm = l2cap_security_cfm,
3607 .recv_acldata = l2cap_recv_acldata
3610 static int __init l2cap_init(void)
3614 err = proto_register(&l2cap_proto, 0);
3618 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3620 BT_ERR("L2CAP socket registration failed");
3624 err = hci_register_proto(&l2cap_hci_proto);
3626 BT_ERR("L2CAP protocol registration failed");
3627 bt_sock_unregister(BTPROTO_L2CAP);
3631 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3632 BT_ERR("Failed to create L2CAP info file");
3634 BT_INFO("L2CAP ver %s", VERSION);
3635 BT_INFO("L2CAP socket layer initialized");
3640 proto_unregister(&l2cap_proto);
3644 static void __exit l2cap_exit(void)
3646 class_remove_file(bt_class, &class_attr_l2cap);
3648 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3649 BT_ERR("L2CAP socket unregistration failed");
3651 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3652 BT_ERR("L2CAP protocol unregistration failed");
3654 proto_unregister(&l2cap_proto);
3657 void l2cap_load(void)
3659 /* Dummy function to trigger automatic L2CAP module loading by
3660 * other modules that use L2CAP sockets but don't use any other
3661 * symbols from it. */
3664 EXPORT_SYMBOL(l2cap_load);
3666 module_init(l2cap_init);
3667 module_exit(l2cap_exit);
3669 module_param(enable_ertm, bool, 0644);
3670 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3672 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3673 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3674 MODULE_VERSION(VERSION);
3675 MODULE_LICENSE("GPL");
3676 MODULE_ALIAS("bt-proto-0");