2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
38 static inline int hci_test_bit(int nr, void *addr)
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
44 static struct hci_sec_filter hci_sec_filter = {
48 { 0x1000d9fe, 0x0000b00c },
53 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
55 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
57 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
59 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60 /* OGF_STATUS_PARAM */
61 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
65 static struct bt_sock_list hci_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
69 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
71 struct hci_filter *flt;
72 int flt_type, flt_event;
75 flt = &hci_pi(sk)->filter;
77 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
80 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
82 if (!test_bit(flt_type, &flt->type_mask))
85 /* Extra filter for event packets only */
86 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
89 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
91 if (!hci_test_bit(flt_event, &flt->event_mask))
94 /* Check filter only when opcode is set */
98 if (flt_event == HCI_EV_CMD_COMPLETE &&
99 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
102 if (flt_event == HCI_EV_CMD_STATUS &&
103 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
109 /* Send frame to RAW socket */
110 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
113 struct sk_buff *skb_copy = NULL;
115 BT_DBG("hdev %p len %d", hdev, skb->len);
117 read_lock(&hci_sk_list.lock);
119 sk_for_each(sk, &hci_sk_list.head) {
120 struct sk_buff *nskb;
122 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
125 /* Don't send frame to the socket it came from */
129 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
132 if (is_filtered_packet(sk, skb))
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
149 if (sock_queue_rcv_skb(sk, nskb))
153 read_unlock(&hci_sk_list.lock);
158 /* Send frame to control socket */
159 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
163 BT_DBG("len %d", skb->len);
165 read_lock(&hci_sk_list.lock);
167 sk_for_each(sk, &hci_sk_list.head) {
168 struct sk_buff *nskb;
170 /* Skip the original socket */
174 if (sk->sk_state != BT_BOUND)
177 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 nskb = skb_clone(skb, GFP_ATOMIC);
184 if (sock_queue_rcv_skb(sk, nskb))
188 read_unlock(&hci_sk_list.lock);
191 /* Send frame to monitor socket */
192 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195 struct sk_buff *skb_copy = NULL;
198 if (!atomic_read(&monitor_promisc))
201 BT_DBG("hdev %p len %d", hdev, skb->len);
203 switch (bt_cb(skb)->pkt_type) {
204 case HCI_COMMAND_PKT:
205 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
208 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
210 case HCI_ACLDATA_PKT:
211 if (bt_cb(skb)->incoming)
212 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
216 case HCI_SCODATA_PKT:
217 if (bt_cb(skb)->incoming)
218 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
226 read_lock(&hci_sk_list.lock);
228 sk_for_each(sk, &hci_sk_list.head) {
229 struct sk_buff *nskb;
231 if (sk->sk_state != BT_BOUND)
234 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 struct hci_mon_hdr *hdr;
240 /* Create a private copy with headroom */
241 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
246 /* Put header before the data */
247 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
248 hdr->opcode = opcode;
249 hdr->index = cpu_to_le16(hdev->id);
250 hdr->len = cpu_to_le16(skb->len);
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
257 if (sock_queue_rcv_skb(sk, nskb))
261 read_unlock(&hci_sk_list.lock);
266 static void send_monitor_event(struct sk_buff *skb)
270 BT_DBG("len %d", skb->len);
272 read_lock(&hci_sk_list.lock);
274 sk_for_each(sk, &hci_sk_list.head) {
275 struct sk_buff *nskb;
277 if (sk->sk_state != BT_BOUND)
280 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
283 nskb = skb_clone(skb, GFP_ATOMIC);
287 if (sock_queue_rcv_skb(sk, nskb))
291 read_unlock(&hci_sk_list.lock);
294 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
296 struct hci_mon_hdr *hdr;
297 struct hci_mon_new_index *ni;
303 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
308 ni->type = hdev->dev_type;
310 bacpy(&ni->bdaddr, &hdev->bdaddr);
311 memcpy(ni->name, hdev->name, 8);
313 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
317 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
328 __net_timestamp(skb);
330 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
331 hdr->opcode = opcode;
332 hdr->index = cpu_to_le16(hdev->id);
333 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
338 static void send_monitor_replay(struct sock *sk)
340 struct hci_dev *hdev;
342 read_lock(&hci_dev_list_lock);
344 list_for_each_entry(hdev, &hci_dev_list, list) {
347 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (sock_queue_rcv_skb(sk, skb))
355 read_unlock(&hci_dev_list_lock);
358 /* Generate internal stack event */
359 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
361 struct hci_event_hdr *hdr;
362 struct hci_ev_stack_internal *ev;
365 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
370 hdr->evt = HCI_EV_STACK_INTERNAL;
371 hdr->plen = sizeof(*ev) + dlen;
373 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
375 memcpy(ev->data, data, dlen);
377 bt_cb(skb)->incoming = 1;
378 __net_timestamp(skb);
380 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
381 skb->dev = (void *) hdev;
382 hci_send_to_sock(hdev, skb);
386 void hci_sock_dev_event(struct hci_dev *hdev, int event)
388 struct hci_ev_si_device ev;
390 BT_DBG("hdev %s event %d", hdev->name, event);
392 /* Send event to monitor */
393 if (atomic_read(&monitor_promisc)) {
396 skb = create_monitor_event(hdev, event);
398 send_monitor_event(skb);
403 /* Send event to sockets */
405 ev.dev_id = hdev->id;
406 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
408 if (event == HCI_DEV_UNREG) {
411 /* Detach sockets from device */
412 read_lock(&hci_sk_list.lock);
413 sk_for_each(sk, &hci_sk_list.head) {
414 bh_lock_sock_nested(sk);
415 if (hci_pi(sk)->hdev == hdev) {
416 hci_pi(sk)->hdev = NULL;
418 sk->sk_state = BT_OPEN;
419 sk->sk_state_change(sk);
425 read_unlock(&hci_sk_list.lock);
429 static int hci_sock_release(struct socket *sock)
431 struct sock *sk = sock->sk;
432 struct hci_dev *hdev;
434 BT_DBG("sock %p sk %p", sock, sk);
439 hdev = hci_pi(sk)->hdev;
441 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
442 atomic_dec(&monitor_promisc);
444 bt_sock_unlink(&hci_sk_list, sk);
447 atomic_dec(&hdev->promisc);
453 skb_queue_purge(&sk->sk_receive_queue);
454 skb_queue_purge(&sk->sk_write_queue);
460 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
465 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 err = hci_blacklist_add(hdev, &bdaddr, 0);
472 hci_dev_unlock(hdev);
477 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
482 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
487 err = hci_blacklist_del(hdev, &bdaddr, 0);
489 hci_dev_unlock(hdev);
494 /* Ioctls that require bound socket */
495 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
498 struct hci_dev *hdev = hci_pi(sk)->hdev;
503 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
508 if (!capable(CAP_NET_ADMIN))
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
515 set_bit(HCI_RAW, &hdev->flags);
517 clear_bit(HCI_RAW, &hdev->flags);
522 return hci_get_conn_info(hdev, (void __user *) arg);
525 return hci_get_auth_info(hdev, (void __user *) arg);
528 if (!capable(CAP_NET_ADMIN))
530 return hci_sock_blacklist_add(hdev, (void __user *) arg);
533 if (!capable(CAP_NET_ADMIN))
535 return hci_sock_blacklist_del(hdev, (void __user *) arg);
539 return hdev->ioctl(hdev, cmd, arg);
544 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
547 void __user *argp = (void __user *) arg;
548 struct sock *sk = sock->sk;
551 BT_DBG("cmd %x arg %lx", cmd, arg);
555 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
564 return hci_get_dev_list(argp);
567 return hci_get_dev_info(argp);
570 return hci_get_conn_list(argp);
573 if (!capable(CAP_NET_ADMIN))
575 return hci_dev_open(arg);
578 if (!capable(CAP_NET_ADMIN))
580 return hci_dev_close(arg);
583 if (!capable(CAP_NET_ADMIN))
585 return hci_dev_reset(arg);
588 if (!capable(CAP_NET_ADMIN))
590 return hci_dev_reset_stat(arg);
600 if (!capable(CAP_NET_ADMIN))
602 return hci_dev_cmd(cmd, argp);
605 return hci_inquiry(argp);
610 err = hci_sock_bound_ioctl(sk, cmd, arg);
617 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
620 struct sockaddr_hci haddr;
621 struct sock *sk = sock->sk;
622 struct hci_dev *hdev = NULL;
625 BT_DBG("sock %p sk %p", sock, sk);
630 memset(&haddr, 0, sizeof(haddr));
631 len = min_t(unsigned int, sizeof(haddr), addr_len);
632 memcpy(&haddr, addr, len);
634 if (haddr.hci_family != AF_BLUETOOTH)
639 if (sk->sk_state == BT_BOUND) {
644 switch (haddr.hci_channel) {
645 case HCI_CHANNEL_RAW:
646 if (hci_pi(sk)->hdev) {
651 if (haddr.hci_dev != HCI_DEV_NONE) {
652 hdev = hci_dev_get(haddr.hci_dev);
658 atomic_inc(&hdev->promisc);
661 hci_pi(sk)->hdev = hdev;
664 case HCI_CHANNEL_CONTROL:
665 if (haddr.hci_dev != HCI_DEV_NONE) {
670 if (!capable(CAP_NET_ADMIN)) {
677 case HCI_CHANNEL_MONITOR:
678 if (haddr.hci_dev != HCI_DEV_NONE) {
683 if (!capable(CAP_NET_RAW)) {
688 send_monitor_replay(sk);
690 atomic_inc(&monitor_promisc);
699 hci_pi(sk)->channel = haddr.hci_channel;
700 sk->sk_state = BT_BOUND;
707 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
708 int *addr_len, int peer)
710 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
711 struct sock *sk = sock->sk;
712 struct hci_dev *hdev;
715 BT_DBG("sock %p sk %p", sock, sk);
722 hdev = hci_pi(sk)->hdev;
728 *addr_len = sizeof(*haddr);
729 haddr->hci_family = AF_BLUETOOTH;
730 haddr->hci_dev = hdev->id;
731 haddr->hci_channel= hci_pi(sk)->channel;
738 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
741 __u32 mask = hci_pi(sk)->cmsg_mask;
743 if (mask & HCI_CMSG_DIR) {
744 int incoming = bt_cb(skb)->incoming;
745 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
749 if (mask & HCI_CMSG_TSTAMP) {
751 struct compat_timeval ctv;
757 skb_get_timestamp(skb, &tv);
762 if (!COMPAT_USE_64BIT_TIME &&
763 (msg->msg_flags & MSG_CMSG_COMPAT)) {
764 ctv.tv_sec = tv.tv_sec;
765 ctv.tv_usec = tv.tv_usec;
771 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
775 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
776 struct msghdr *msg, size_t len, int flags)
778 int noblock = flags & MSG_DONTWAIT;
779 struct sock *sk = sock->sk;
783 BT_DBG("sock %p, sk %p", sock, sk);
785 if (flags & (MSG_OOB))
788 if (sk->sk_state == BT_CLOSED)
791 skb = skb_recv_datagram(sk, flags, noblock, &err);
795 msg->msg_namelen = 0;
799 msg->msg_flags |= MSG_TRUNC;
803 skb_reset_transport_header(skb);
804 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
806 switch (hci_pi(sk)->channel) {
807 case HCI_CHANNEL_RAW:
808 hci_sock_cmsg(sk, msg, skb);
810 case HCI_CHANNEL_CONTROL:
811 case HCI_CHANNEL_MONITOR:
812 sock_recv_timestamp(msg, sk, skb);
816 skb_free_datagram(sk, skb);
818 return err ? : copied;
821 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
822 struct msghdr *msg, size_t len)
824 struct sock *sk = sock->sk;
825 struct hci_dev *hdev;
829 BT_DBG("sock %p sk %p", sock, sk);
831 if (msg->msg_flags & MSG_OOB)
834 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
837 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
842 switch (hci_pi(sk)->channel) {
843 case HCI_CHANNEL_RAW:
845 case HCI_CHANNEL_CONTROL:
846 err = mgmt_control(sk, msg, len);
848 case HCI_CHANNEL_MONITOR:
856 hdev = hci_pi(sk)->hdev;
862 if (!test_bit(HCI_UP, &hdev->flags)) {
867 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
871 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
876 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
878 skb->dev = (void *) hdev;
880 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
881 u16 opcode = get_unaligned_le16(skb->data);
882 u16 ogf = hci_opcode_ogf(opcode);
883 u16 ocf = hci_opcode_ocf(opcode);
885 if (((ogf > HCI_SFLT_MAX_OGF) ||
886 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
887 &hci_sec_filter.ocf_mask[ogf])) &&
888 !capable(CAP_NET_RAW)) {
893 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
894 skb_queue_tail(&hdev->raw_q, skb);
895 queue_work(hdev->workqueue, &hdev->tx_work);
897 /* Stand-alone HCI commands must be flaged as
898 * single-command requests.
900 bt_cb(skb)->req.start = true;
902 skb_queue_tail(&hdev->cmd_q, skb);
903 queue_work(hdev->workqueue, &hdev->cmd_work);
906 if (!capable(CAP_NET_RAW)) {
911 skb_queue_tail(&hdev->raw_q, skb);
912 queue_work(hdev->workqueue, &hdev->tx_work);
926 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
927 char __user *optval, unsigned int len)
929 struct hci_ufilter uf = { .opcode = 0 };
930 struct sock *sk = sock->sk;
931 int err = 0, opt = 0;
933 BT_DBG("sk %p, opt %d", sk, optname);
937 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
944 if (get_user(opt, (int __user *)optval)) {
950 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
952 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
956 if (get_user(opt, (int __user *)optval)) {
962 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
964 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
969 struct hci_filter *f = &hci_pi(sk)->filter;
971 uf.type_mask = f->type_mask;
972 uf.opcode = f->opcode;
973 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
974 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
977 len = min_t(unsigned int, len, sizeof(uf));
978 if (copy_from_user(&uf, optval, len)) {
983 if (!capable(CAP_NET_RAW)) {
984 uf.type_mask &= hci_sec_filter.type_mask;
985 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
986 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
990 struct hci_filter *f = &hci_pi(sk)->filter;
992 f->type_mask = uf.type_mask;
993 f->opcode = uf.opcode;
994 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
995 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1009 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1010 char __user *optval, int __user *optlen)
1012 struct hci_ufilter uf;
1013 struct sock *sk = sock->sk;
1014 int len, opt, err = 0;
1016 BT_DBG("sk %p, opt %d", sk, optname);
1018 if (get_user(len, optlen))
1023 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1030 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1035 if (put_user(opt, optval))
1039 case HCI_TIME_STAMP:
1040 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1045 if (put_user(opt, optval))
1051 struct hci_filter *f = &hci_pi(sk)->filter;
1053 memset(&uf, 0, sizeof(uf));
1054 uf.type_mask = f->type_mask;
1055 uf.opcode = f->opcode;
1056 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1057 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1060 len = min_t(unsigned int, len, sizeof(uf));
1061 if (copy_to_user(optval, &uf, len))
1075 static const struct proto_ops hci_sock_ops = {
1076 .family = PF_BLUETOOTH,
1077 .owner = THIS_MODULE,
1078 .release = hci_sock_release,
1079 .bind = hci_sock_bind,
1080 .getname = hci_sock_getname,
1081 .sendmsg = hci_sock_sendmsg,
1082 .recvmsg = hci_sock_recvmsg,
1083 .ioctl = hci_sock_ioctl,
1084 .poll = datagram_poll,
1085 .listen = sock_no_listen,
1086 .shutdown = sock_no_shutdown,
1087 .setsockopt = hci_sock_setsockopt,
1088 .getsockopt = hci_sock_getsockopt,
1089 .connect = sock_no_connect,
1090 .socketpair = sock_no_socketpair,
1091 .accept = sock_no_accept,
1092 .mmap = sock_no_mmap
1095 static struct proto hci_sk_proto = {
1097 .owner = THIS_MODULE,
1098 .obj_size = sizeof(struct hci_pinfo)
1101 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1106 BT_DBG("sock %p", sock);
1108 if (sock->type != SOCK_RAW)
1109 return -ESOCKTNOSUPPORT;
1111 sock->ops = &hci_sock_ops;
1113 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1117 sock_init_data(sock, sk);
1119 sock_reset_flag(sk, SOCK_ZAPPED);
1121 sk->sk_protocol = protocol;
1123 sock->state = SS_UNCONNECTED;
1124 sk->sk_state = BT_OPEN;
1126 bt_sock_link(&hci_sk_list, sk);
1130 static const struct net_proto_family hci_sock_family_ops = {
1131 .family = PF_BLUETOOTH,
1132 .owner = THIS_MODULE,
1133 .create = hci_sock_create,
1136 int __init hci_sock_init(void)
1140 err = proto_register(&hci_sk_proto, 0);
1144 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1146 BT_ERR("HCI socket registration failed");
1150 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1152 BT_ERR("Failed to create HCI proc file");
1153 bt_sock_unregister(BTPROTO_HCI);
1157 BT_INFO("HCI socket layer initialized");
1162 proto_unregister(&hci_sk_proto);
1166 void hci_sock_cleanup(void)
1168 bt_procfs_cleanup(&init_net, "hci");
1169 bt_sock_unregister(BTPROTO_HCI);
1170 proto_unregister(&hci_sk_proto);