2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "mgmt_util.h"
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
42 /* ----- HCI socket interface ----- */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 struct hci_filter filter;
52 unsigned short channel;
56 void hci_sock_set_flag(struct sock *sk, int nr)
58 set_bit(nr, &hci_pi(sk)->flags);
61 void hci_sock_clear_flag(struct sock *sk, int nr)
63 clear_bit(nr, &hci_pi(sk)->flags);
66 int hci_sock_test_flag(struct sock *sk, int nr)
68 return test_bit(nr, &hci_pi(sk)->flags);
71 unsigned short hci_sock_get_channel(struct sock *sk)
73 return hci_pi(sk)->channel;
76 static inline int hci_test_bit(int nr, const void *addr)
78 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
82 #define HCI_SFLT_MAX_OGF 5
84 struct hci_sec_filter {
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
90 static const struct hci_sec_filter hci_sec_filter = {
94 { 0x1000d9fe, 0x0000b00c },
99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 /* OGF_LINK_POLICY */
101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 /* OGF_STATUS_PARAM */
107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
111 static struct bt_sock_list hci_sk_list = {
112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
117 struct hci_filter *flt;
118 int flt_type, flt_event;
121 flt = &hci_pi(sk)->filter;
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
128 if (!test_bit(flt_type, &flt->type_mask))
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
137 if (!hci_test_bit(flt_event, &flt->event_mask))
140 /* Check filter only when opcode is set */
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
159 struct sk_buff *skb_copy = NULL;
161 BT_DBG("hdev %p len %d", hdev, skb->len);
163 read_lock(&hci_sk_list.lock);
165 sk_for_each(sk, &hci_sk_list.head) {
166 struct sk_buff *nskb;
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
171 /* Don't send frame to the socket it came from */
175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
186 /* Don't send frame to other channel types */
191 /* Create a private copy with headroom */
192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
204 if (sock_queue_rcv_skb(sk, nskb))
208 read_unlock(&hci_sk_list.lock);
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
215 int flag, struct sock *skip_sk)
219 BT_DBG("channel %u len %d", channel, skb->len);
221 read_lock(&hci_sk_list.lock);
223 sk_for_each(sk, &hci_sk_list.head) {
224 struct sk_buff *nskb;
226 /* Ignore socket without the flag set */
227 if (!hci_sock_test_flag(sk, flag))
230 /* Skip the original socket */
234 if (sk->sk_state != BT_BOUND)
237 if (hci_pi(sk)->channel != channel)
240 nskb = skb_clone(skb, GFP_ATOMIC);
244 if (sock_queue_rcv_skb(sk, nskb))
248 read_unlock(&hci_sk_list.lock);
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
254 struct sk_buff *skb_copy = NULL;
255 struct hci_mon_hdr *hdr;
258 if (!atomic_read(&monitor_promisc))
261 BT_DBG("hdev %p len %d", hdev, skb->len);
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
283 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
289 /* Create a private copy with headroom */
290 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
294 /* Put header before the data */
295 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
296 hdr->opcode = opcode;
297 hdr->index = cpu_to_le16(hdev->id);
298 hdr->len = cpu_to_le16(skb->len);
300 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
301 HCI_SOCK_TRUSTED, NULL);
305 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
307 struct hci_mon_hdr *hdr;
308 struct hci_mon_new_index *ni;
309 struct hci_mon_index_info *ii;
315 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
319 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
320 ni->type = hdev->dev_type;
322 bacpy(&ni->bdaddr, &hdev->bdaddr);
323 memcpy(ni->name, hdev->name, 8);
325 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
329 skb = bt_skb_alloc(0, GFP_ATOMIC);
333 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
337 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
341 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
342 bacpy(&ii->bdaddr, &hdev->bdaddr);
343 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
345 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
349 skb = bt_skb_alloc(0, GFP_ATOMIC);
353 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
357 skb = bt_skb_alloc(0, GFP_ATOMIC);
361 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
368 __net_timestamp(skb);
370 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
371 hdr->opcode = opcode;
372 hdr->index = cpu_to_le16(hdev->id);
373 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
378 static void send_monitor_replay(struct sock *sk)
380 struct hci_dev *hdev;
382 read_lock(&hci_dev_list_lock);
384 list_for_each_entry(hdev, &hci_dev_list, list) {
387 skb = create_monitor_event(hdev, HCI_DEV_REG);
391 if (sock_queue_rcv_skb(sk, skb))
394 if (!test_bit(HCI_RUNNING, &hdev->flags))
397 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
401 if (sock_queue_rcv_skb(sk, skb))
404 if (!test_bit(HCI_UP, &hdev->flags))
407 skb = create_monitor_event(hdev, HCI_DEV_UP);
411 if (sock_queue_rcv_skb(sk, skb))
415 read_unlock(&hci_dev_list_lock);
418 /* Generate internal stack event */
419 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
421 struct hci_event_hdr *hdr;
422 struct hci_ev_stack_internal *ev;
425 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
429 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
430 hdr->evt = HCI_EV_STACK_INTERNAL;
431 hdr->plen = sizeof(*ev) + dlen;
433 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
435 memcpy(ev->data, data, dlen);
437 bt_cb(skb)->incoming = 1;
438 __net_timestamp(skb);
440 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
441 hci_send_to_sock(hdev, skb);
445 void hci_sock_dev_event(struct hci_dev *hdev, int event)
447 BT_DBG("hdev %s event %d", hdev->name, event);
449 if (atomic_read(&monitor_promisc)) {
452 /* Send event to monitor */
453 skb = create_monitor_event(hdev, event);
455 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
456 HCI_SOCK_TRUSTED, NULL);
461 if (event <= HCI_DEV_DOWN) {
462 struct hci_ev_si_device ev;
464 /* Send event to sockets */
466 ev.dev_id = hdev->id;
467 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
470 if (event == HCI_DEV_UNREG) {
473 /* Detach sockets from device */
474 read_lock(&hci_sk_list.lock);
475 sk_for_each(sk, &hci_sk_list.head) {
476 bh_lock_sock_nested(sk);
477 if (hci_pi(sk)->hdev == hdev) {
478 hci_pi(sk)->hdev = NULL;
480 sk->sk_state = BT_OPEN;
481 sk->sk_state_change(sk);
487 read_unlock(&hci_sk_list.lock);
491 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
493 struct hci_mgmt_chan *c;
495 list_for_each_entry(c, &mgmt_chan_list, list) {
496 if (c->channel == channel)
503 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
505 struct hci_mgmt_chan *c;
507 mutex_lock(&mgmt_chan_list_lock);
508 c = __hci_mgmt_chan_find(channel);
509 mutex_unlock(&mgmt_chan_list_lock);
514 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
516 if (c->channel < HCI_CHANNEL_CONTROL)
519 mutex_lock(&mgmt_chan_list_lock);
520 if (__hci_mgmt_chan_find(c->channel)) {
521 mutex_unlock(&mgmt_chan_list_lock);
525 list_add_tail(&c->list, &mgmt_chan_list);
527 mutex_unlock(&mgmt_chan_list_lock);
531 EXPORT_SYMBOL(hci_mgmt_chan_register);
533 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
535 mutex_lock(&mgmt_chan_list_lock);
537 mutex_unlock(&mgmt_chan_list_lock);
539 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
541 static int hci_sock_release(struct socket *sock)
543 struct sock *sk = sock->sk;
544 struct hci_dev *hdev;
546 BT_DBG("sock %p sk %p", sock, sk);
551 hdev = hci_pi(sk)->hdev;
553 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
554 atomic_dec(&monitor_promisc);
556 bt_sock_unlink(&hci_sk_list, sk);
559 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
560 /* When releasing an user channel exclusive access,
561 * call hci_dev_do_close directly instead of calling
562 * hci_dev_close to ensure the exclusive access will
563 * be released and the controller brought back down.
565 * The checking of HCI_AUTO_OFF is not needed in this
566 * case since it will have been cleared already when
567 * opening the user channel.
569 hci_dev_do_close(hdev);
570 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
571 mgmt_index_added(hdev);
574 atomic_dec(&hdev->promisc);
580 skb_queue_purge(&sk->sk_receive_queue);
581 skb_queue_purge(&sk->sk_write_queue);
587 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
592 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
597 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
599 hci_dev_unlock(hdev);
604 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
609 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
614 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
616 hci_dev_unlock(hdev);
621 /* Ioctls that require bound socket */
622 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
625 struct hci_dev *hdev = hci_pi(sk)->hdev;
630 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
633 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
636 if (hdev->dev_type != HCI_BREDR)
641 if (!capable(CAP_NET_ADMIN))
646 return hci_get_conn_info(hdev, (void __user *) arg);
649 return hci_get_auth_info(hdev, (void __user *) arg);
652 if (!capable(CAP_NET_ADMIN))
654 return hci_sock_blacklist_add(hdev, (void __user *) arg);
657 if (!capable(CAP_NET_ADMIN))
659 return hci_sock_blacklist_del(hdev, (void __user *) arg);
665 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
668 void __user *argp = (void __user *) arg;
669 struct sock *sk = sock->sk;
672 BT_DBG("cmd %x arg %lx", cmd, arg);
676 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
685 return hci_get_dev_list(argp);
688 return hci_get_dev_info(argp);
691 return hci_get_conn_list(argp);
694 if (!capable(CAP_NET_ADMIN))
696 return hci_dev_open(arg);
699 if (!capable(CAP_NET_ADMIN))
701 return hci_dev_close(arg);
704 if (!capable(CAP_NET_ADMIN))
706 return hci_dev_reset(arg);
709 if (!capable(CAP_NET_ADMIN))
711 return hci_dev_reset_stat(arg);
721 if (!capable(CAP_NET_ADMIN))
723 return hci_dev_cmd(cmd, argp);
726 return hci_inquiry(argp);
731 err = hci_sock_bound_ioctl(sk, cmd, arg);
738 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
741 struct sockaddr_hci haddr;
742 struct sock *sk = sock->sk;
743 struct hci_dev *hdev = NULL;
746 BT_DBG("sock %p sk %p", sock, sk);
751 memset(&haddr, 0, sizeof(haddr));
752 len = min_t(unsigned int, sizeof(haddr), addr_len);
753 memcpy(&haddr, addr, len);
755 if (haddr.hci_family != AF_BLUETOOTH)
760 if (sk->sk_state == BT_BOUND) {
765 switch (haddr.hci_channel) {
766 case HCI_CHANNEL_RAW:
767 if (hci_pi(sk)->hdev) {
772 if (haddr.hci_dev != HCI_DEV_NONE) {
773 hdev = hci_dev_get(haddr.hci_dev);
779 atomic_inc(&hdev->promisc);
782 hci_pi(sk)->hdev = hdev;
785 case HCI_CHANNEL_USER:
786 if (hci_pi(sk)->hdev) {
791 if (haddr.hci_dev == HCI_DEV_NONE) {
796 if (!capable(CAP_NET_ADMIN)) {
801 hdev = hci_dev_get(haddr.hci_dev);
807 if (test_bit(HCI_INIT, &hdev->flags) ||
808 hci_dev_test_flag(hdev, HCI_SETUP) ||
809 hci_dev_test_flag(hdev, HCI_CONFIG) ||
810 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
811 test_bit(HCI_UP, &hdev->flags))) {
817 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
823 mgmt_index_removed(hdev);
825 err = hci_dev_open(hdev->id);
827 if (err == -EALREADY) {
828 /* In case the transport is already up and
829 * running, clear the error here.
831 * This can happen when opening an user
832 * channel and HCI_AUTO_OFF grace period
837 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
838 mgmt_index_added(hdev);
844 atomic_inc(&hdev->promisc);
846 hci_pi(sk)->hdev = hdev;
849 case HCI_CHANNEL_MONITOR:
850 if (haddr.hci_dev != HCI_DEV_NONE) {
855 if (!capable(CAP_NET_RAW)) {
860 /* The monitor interface is restricted to CAP_NET_RAW
861 * capabilities and with that implicitly trusted.
863 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
865 send_monitor_replay(sk);
867 atomic_inc(&monitor_promisc);
871 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
876 if (haddr.hci_dev != HCI_DEV_NONE) {
881 /* Users with CAP_NET_ADMIN capabilities are allowed
882 * access to all management commands and events. For
883 * untrusted users the interface is restricted and
884 * also only untrusted events are sent.
886 if (capable(CAP_NET_ADMIN))
887 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
889 /* At the moment the index and unconfigured index events
890 * are enabled unconditionally. Setting them on each
891 * socket when binding keeps this functionality. They
892 * however might be cleared later and then sending of these
893 * events will be disabled, but that is then intentional.
895 * This also enables generic events that are safe to be
896 * received by untrusted users. Example for such events
897 * are changes to settings, class of device, name etc.
899 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
900 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
901 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
902 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
908 hci_pi(sk)->channel = haddr.hci_channel;
909 sk->sk_state = BT_BOUND;
916 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
917 int *addr_len, int peer)
919 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
920 struct sock *sk = sock->sk;
921 struct hci_dev *hdev;
924 BT_DBG("sock %p sk %p", sock, sk);
931 hdev = hci_pi(sk)->hdev;
937 *addr_len = sizeof(*haddr);
938 haddr->hci_family = AF_BLUETOOTH;
939 haddr->hci_dev = hdev->id;
940 haddr->hci_channel= hci_pi(sk)->channel;
947 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
950 __u32 mask = hci_pi(sk)->cmsg_mask;
952 if (mask & HCI_CMSG_DIR) {
953 int incoming = bt_cb(skb)->incoming;
954 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
958 if (mask & HCI_CMSG_TSTAMP) {
960 struct compat_timeval ctv;
966 skb_get_timestamp(skb, &tv);
971 if (!COMPAT_USE_64BIT_TIME &&
972 (msg->msg_flags & MSG_CMSG_COMPAT)) {
973 ctv.tv_sec = tv.tv_sec;
974 ctv.tv_usec = tv.tv_usec;
980 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
984 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
987 int noblock = flags & MSG_DONTWAIT;
988 struct sock *sk = sock->sk;
992 BT_DBG("sock %p, sk %p", sock, sk);
994 if (flags & (MSG_OOB))
997 if (sk->sk_state == BT_CLOSED)
1000 skb = skb_recv_datagram(sk, flags, noblock, &err);
1006 msg->msg_flags |= MSG_TRUNC;
1010 skb_reset_transport_header(skb);
1011 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1013 switch (hci_pi(sk)->channel) {
1014 case HCI_CHANNEL_RAW:
1015 hci_sock_cmsg(sk, msg, skb);
1017 case HCI_CHANNEL_USER:
1018 case HCI_CHANNEL_MONITOR:
1019 sock_recv_timestamp(msg, sk, skb);
1022 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1023 sock_recv_timestamp(msg, sk, skb);
1027 skb_free_datagram(sk, skb);
1029 return err ? : copied;
1032 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1033 struct msghdr *msg, size_t msglen)
1037 struct mgmt_hdr *hdr;
1038 u16 opcode, index, len;
1039 struct hci_dev *hdev = NULL;
1040 const struct hci_mgmt_handler *handler;
1041 bool var_len, no_hdev;
1044 BT_DBG("got %zu bytes", msglen);
1046 if (msglen < sizeof(*hdr))
1049 buf = kmalloc(msglen, GFP_KERNEL);
1053 if (memcpy_from_msg(buf, msg, msglen)) {
1059 opcode = __le16_to_cpu(hdr->opcode);
1060 index = __le16_to_cpu(hdr->index);
1061 len = __le16_to_cpu(hdr->len);
1063 if (len != msglen - sizeof(*hdr)) {
1068 if (opcode >= chan->handler_count ||
1069 chan->handlers[opcode].func == NULL) {
1070 BT_DBG("Unknown op %u", opcode);
1071 err = mgmt_cmd_status(sk, index, opcode,
1072 MGMT_STATUS_UNKNOWN_COMMAND);
1076 handler = &chan->handlers[opcode];
1078 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1079 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1080 err = mgmt_cmd_status(sk, index, opcode,
1081 MGMT_STATUS_PERMISSION_DENIED);
1085 if (index != MGMT_INDEX_NONE) {
1086 hdev = hci_dev_get(index);
1088 err = mgmt_cmd_status(sk, index, opcode,
1089 MGMT_STATUS_INVALID_INDEX);
1093 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1094 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1095 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1096 err = mgmt_cmd_status(sk, index, opcode,
1097 MGMT_STATUS_INVALID_INDEX);
1101 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1102 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1103 err = mgmt_cmd_status(sk, index, opcode,
1104 MGMT_STATUS_INVALID_INDEX);
1109 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1110 if (no_hdev != !hdev) {
1111 err = mgmt_cmd_status(sk, index, opcode,
1112 MGMT_STATUS_INVALID_INDEX);
1116 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1117 if ((var_len && len < handler->data_len) ||
1118 (!var_len && len != handler->data_len)) {
1119 err = mgmt_cmd_status(sk, index, opcode,
1120 MGMT_STATUS_INVALID_PARAMS);
1124 if (hdev && chan->hdev_init)
1125 chan->hdev_init(sk, hdev);
1127 cp = buf + sizeof(*hdr);
1129 err = handler->func(sk, hdev, cp, len);
1143 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1146 struct sock *sk = sock->sk;
1147 struct hci_mgmt_chan *chan;
1148 struct hci_dev *hdev;
1149 struct sk_buff *skb;
1152 BT_DBG("sock %p sk %p", sock, sk);
1154 if (msg->msg_flags & MSG_OOB)
1157 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1160 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1165 switch (hci_pi(sk)->channel) {
1166 case HCI_CHANNEL_RAW:
1167 case HCI_CHANNEL_USER:
1169 case HCI_CHANNEL_MONITOR:
1173 mutex_lock(&mgmt_chan_list_lock);
1174 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1176 err = hci_mgmt_cmd(chan, sk, msg, len);
1180 mutex_unlock(&mgmt_chan_list_lock);
1184 hdev = hci_pi(sk)->hdev;
1190 if (!test_bit(HCI_UP, &hdev->flags)) {
1195 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1199 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1204 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1207 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1208 /* No permission check is needed for user channel
1209 * since that gets enforced when binding the socket.
1211 * However check that the packet type is valid.
1213 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1214 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1215 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1220 skb_queue_tail(&hdev->raw_q, skb);
1221 queue_work(hdev->workqueue, &hdev->tx_work);
1222 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1223 u16 opcode = get_unaligned_le16(skb->data);
1224 u16 ogf = hci_opcode_ogf(opcode);
1225 u16 ocf = hci_opcode_ocf(opcode);
1227 if (((ogf > HCI_SFLT_MAX_OGF) ||
1228 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1229 &hci_sec_filter.ocf_mask[ogf])) &&
1230 !capable(CAP_NET_RAW)) {
1236 skb_queue_tail(&hdev->raw_q, skb);
1237 queue_work(hdev->workqueue, &hdev->tx_work);
1239 /* Stand-alone HCI commands must be flagged as
1240 * single-command requests.
1242 bt_cb(skb)->req.start = true;
1244 skb_queue_tail(&hdev->cmd_q, skb);
1245 queue_work(hdev->workqueue, &hdev->cmd_work);
1248 if (!capable(CAP_NET_RAW)) {
1253 skb_queue_tail(&hdev->raw_q, skb);
1254 queue_work(hdev->workqueue, &hdev->tx_work);
1268 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1269 char __user *optval, unsigned int len)
1271 struct hci_ufilter uf = { .opcode = 0 };
1272 struct sock *sk = sock->sk;
1273 int err = 0, opt = 0;
1275 BT_DBG("sk %p, opt %d", sk, optname);
1279 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1286 if (get_user(opt, (int __user *)optval)) {
1292 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1294 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1297 case HCI_TIME_STAMP:
1298 if (get_user(opt, (int __user *)optval)) {
1304 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1306 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1311 struct hci_filter *f = &hci_pi(sk)->filter;
1313 uf.type_mask = f->type_mask;
1314 uf.opcode = f->opcode;
1315 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1316 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1319 len = min_t(unsigned int, len, sizeof(uf));
1320 if (copy_from_user(&uf, optval, len)) {
1325 if (!capable(CAP_NET_RAW)) {
1326 uf.type_mask &= hci_sec_filter.type_mask;
1327 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1328 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1332 struct hci_filter *f = &hci_pi(sk)->filter;
1334 f->type_mask = uf.type_mask;
1335 f->opcode = uf.opcode;
1336 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1337 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1351 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1352 char __user *optval, int __user *optlen)
1354 struct hci_ufilter uf;
1355 struct sock *sk = sock->sk;
1356 int len, opt, err = 0;
1358 BT_DBG("sk %p, opt %d", sk, optname);
1360 if (get_user(len, optlen))
1365 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1372 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1377 if (put_user(opt, optval))
1381 case HCI_TIME_STAMP:
1382 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1387 if (put_user(opt, optval))
1393 struct hci_filter *f = &hci_pi(sk)->filter;
1395 memset(&uf, 0, sizeof(uf));
1396 uf.type_mask = f->type_mask;
1397 uf.opcode = f->opcode;
1398 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1399 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1402 len = min_t(unsigned int, len, sizeof(uf));
1403 if (copy_to_user(optval, &uf, len))
1417 static const struct proto_ops hci_sock_ops = {
1418 .family = PF_BLUETOOTH,
1419 .owner = THIS_MODULE,
1420 .release = hci_sock_release,
1421 .bind = hci_sock_bind,
1422 .getname = hci_sock_getname,
1423 .sendmsg = hci_sock_sendmsg,
1424 .recvmsg = hci_sock_recvmsg,
1425 .ioctl = hci_sock_ioctl,
1426 .poll = datagram_poll,
1427 .listen = sock_no_listen,
1428 .shutdown = sock_no_shutdown,
1429 .setsockopt = hci_sock_setsockopt,
1430 .getsockopt = hci_sock_getsockopt,
1431 .connect = sock_no_connect,
1432 .socketpair = sock_no_socketpair,
1433 .accept = sock_no_accept,
1434 .mmap = sock_no_mmap
1437 static struct proto hci_sk_proto = {
1439 .owner = THIS_MODULE,
1440 .obj_size = sizeof(struct hci_pinfo)
1443 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1448 BT_DBG("sock %p", sock);
1450 if (sock->type != SOCK_RAW)
1451 return -ESOCKTNOSUPPORT;
1453 sock->ops = &hci_sock_ops;
1455 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1459 sock_init_data(sock, sk);
1461 sock_reset_flag(sk, SOCK_ZAPPED);
1463 sk->sk_protocol = protocol;
1465 sock->state = SS_UNCONNECTED;
1466 sk->sk_state = BT_OPEN;
1468 bt_sock_link(&hci_sk_list, sk);
1472 static const struct net_proto_family hci_sock_family_ops = {
1473 .family = PF_BLUETOOTH,
1474 .owner = THIS_MODULE,
1475 .create = hci_sock_create,
1478 int __init hci_sock_init(void)
1482 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1484 err = proto_register(&hci_sk_proto, 0);
1488 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1490 BT_ERR("HCI socket registration failed");
1494 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1496 BT_ERR("Failed to create HCI proc file");
1497 bt_sock_unregister(BTPROTO_HCI);
1501 BT_INFO("HCI socket layer initialized");
1506 proto_unregister(&hci_sk_proto);
1510 void hci_sock_cleanup(void)
1512 bt_procfs_cleanup(&init_net, "hci");
1513 bt_sock_unregister(BTPROTO_HCI);
1514 proto_unregister(&hci_sk_proto);