2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
38 static inline int hci_test_bit(int nr, void *addr)
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
44 static struct hci_sec_filter hci_sec_filter = {
48 { 0x1000d9fe, 0x0000b00c },
53 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
55 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
57 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
59 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60 /* OGF_STATUS_PARAM */
61 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
65 static struct bt_sock_list hci_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
69 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
71 struct hci_filter *flt;
72 int flt_type, flt_event;
75 flt = &hci_pi(sk)->filter;
77 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
80 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
82 if (!test_bit(flt_type, &flt->type_mask))
85 /* Extra filter for event packets only */
86 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
89 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
91 if (!hci_test_bit(flt_event, &flt->event_mask))
94 /* Check filter only when opcode is set */
98 if (flt_event == HCI_EV_CMD_COMPLETE &&
99 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
102 if (flt_event == HCI_EV_CMD_STATUS &&
103 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
109 /* Send frame to RAW socket */
110 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
113 struct sk_buff *skb_copy = NULL;
115 BT_DBG("hdev %p len %d", hdev, skb->len);
117 read_lock(&hci_sk_list.lock);
119 sk_for_each(sk, &hci_sk_list.head) {
120 struct sk_buff *nskb;
122 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
125 /* Don't send frame to the socket it came from */
129 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
132 if (is_filtered_packet(sk, skb))
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
149 if (sock_queue_rcv_skb(sk, nskb))
153 read_unlock(&hci_sk_list.lock);
158 /* Send frame to control socket */
159 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
163 BT_DBG("len %d", skb->len);
165 read_lock(&hci_sk_list.lock);
167 sk_for_each(sk, &hci_sk_list.head) {
168 struct sk_buff *nskb;
170 /* Skip the original socket */
174 if (sk->sk_state != BT_BOUND)
177 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 nskb = skb_clone(skb, GFP_ATOMIC);
184 if (sock_queue_rcv_skb(sk, nskb))
188 read_unlock(&hci_sk_list.lock);
191 /* Send frame to monitor socket */
192 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195 struct sk_buff *skb_copy = NULL;
198 if (!atomic_read(&monitor_promisc))
201 BT_DBG("hdev %p len %d", hdev, skb->len);
203 switch (bt_cb(skb)->pkt_type) {
204 case HCI_COMMAND_PKT:
205 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
208 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
210 case HCI_ACLDATA_PKT:
211 if (bt_cb(skb)->incoming)
212 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
216 case HCI_SCODATA_PKT:
217 if (bt_cb(skb)->incoming)
218 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
226 read_lock(&hci_sk_list.lock);
228 sk_for_each(sk, &hci_sk_list.head) {
229 struct sk_buff *nskb;
231 if (sk->sk_state != BT_BOUND)
234 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 struct hci_mon_hdr *hdr;
240 /* Create a private copy with headroom */
241 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
246 /* Put header before the data */
247 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
248 hdr->opcode = opcode;
249 hdr->index = cpu_to_le16(hdev->id);
250 hdr->len = cpu_to_le16(skb->len);
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
257 if (sock_queue_rcv_skb(sk, nskb))
261 read_unlock(&hci_sk_list.lock);
266 static void send_monitor_event(struct sk_buff *skb)
270 BT_DBG("len %d", skb->len);
272 read_lock(&hci_sk_list.lock);
274 sk_for_each(sk, &hci_sk_list.head) {
275 struct sk_buff *nskb;
277 if (sk->sk_state != BT_BOUND)
280 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
283 nskb = skb_clone(skb, GFP_ATOMIC);
287 if (sock_queue_rcv_skb(sk, nskb))
291 read_unlock(&hci_sk_list.lock);
294 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
296 struct hci_mon_hdr *hdr;
297 struct hci_mon_new_index *ni;
303 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
308 ni->type = hdev->dev_type;
310 bacpy(&ni->bdaddr, &hdev->bdaddr);
311 memcpy(ni->name, hdev->name, 8);
313 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
317 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
328 __net_timestamp(skb);
330 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
331 hdr->opcode = opcode;
332 hdr->index = cpu_to_le16(hdev->id);
333 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
338 static void send_monitor_replay(struct sock *sk)
340 struct hci_dev *hdev;
342 read_lock(&hci_dev_list_lock);
344 list_for_each_entry(hdev, &hci_dev_list, list) {
347 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (sock_queue_rcv_skb(sk, skb))
355 read_unlock(&hci_dev_list_lock);
358 /* Generate internal stack event */
359 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
361 struct hci_event_hdr *hdr;
362 struct hci_ev_stack_internal *ev;
365 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
370 hdr->evt = HCI_EV_STACK_INTERNAL;
371 hdr->plen = sizeof(*ev) + dlen;
373 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
375 memcpy(ev->data, data, dlen);
377 bt_cb(skb)->incoming = 1;
378 __net_timestamp(skb);
380 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
381 skb->dev = (void *) hdev;
382 hci_send_to_sock(hdev, skb);
386 void hci_sock_dev_event(struct hci_dev *hdev, int event)
388 struct hci_ev_si_device ev;
390 BT_DBG("hdev %s event %d", hdev->name, event);
392 /* Send event to monitor */
393 if (atomic_read(&monitor_promisc)) {
396 skb = create_monitor_event(hdev, event);
398 send_monitor_event(skb);
403 /* Send event to sockets */
405 ev.dev_id = hdev->id;
406 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
408 if (event == HCI_DEV_UNREG) {
411 /* Detach sockets from device */
412 read_lock(&hci_sk_list.lock);
413 sk_for_each(sk, &hci_sk_list.head) {
414 bh_lock_sock_nested(sk);
415 if (hci_pi(sk)->hdev == hdev) {
416 hci_pi(sk)->hdev = NULL;
418 sk->sk_state = BT_OPEN;
419 sk->sk_state_change(sk);
425 read_unlock(&hci_sk_list.lock);
429 static int hci_sock_release(struct socket *sock)
431 struct sock *sk = sock->sk;
432 struct hci_dev *hdev;
434 BT_DBG("sock %p sk %p", sock, sk);
439 hdev = hci_pi(sk)->hdev;
441 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
442 atomic_dec(&monitor_promisc);
444 bt_sock_unlink(&hci_sk_list, sk);
447 atomic_dec(&hdev->promisc);
453 skb_queue_purge(&sk->sk_receive_queue);
454 skb_queue_purge(&sk->sk_write_queue);
460 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
465 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 err = hci_blacklist_add(hdev, &bdaddr, 0);
472 hci_dev_unlock(hdev);
477 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
482 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
487 err = hci_blacklist_del(hdev, &bdaddr, 0);
489 hci_dev_unlock(hdev);
494 /* Ioctls that require bound socket */
495 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
498 struct hci_dev *hdev = hci_pi(sk)->hdev;
505 if (!capable(CAP_NET_ADMIN))
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
514 clear_bit(HCI_RAW, &hdev->flags);
519 return hci_get_conn_info(hdev, (void __user *) arg);
522 return hci_get_auth_info(hdev, (void __user *) arg);
525 if (!capable(CAP_NET_ADMIN))
527 return hci_sock_blacklist_add(hdev, (void __user *) arg);
530 if (!capable(CAP_NET_ADMIN))
532 return hci_sock_blacklist_del(hdev, (void __user *) arg);
536 return hdev->ioctl(hdev, cmd, arg);
541 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
544 struct sock *sk = sock->sk;
545 void __user *argp = (void __user *) arg;
548 BT_DBG("cmd %x arg %lx", cmd, arg);
552 return hci_get_dev_list(argp);
555 return hci_get_dev_info(argp);
558 return hci_get_conn_list(argp);
561 if (!capable(CAP_NET_ADMIN))
563 return hci_dev_open(arg);
566 if (!capable(CAP_NET_ADMIN))
568 return hci_dev_close(arg);
571 if (!capable(CAP_NET_ADMIN))
573 return hci_dev_reset(arg);
576 if (!capable(CAP_NET_ADMIN))
578 return hci_dev_reset_stat(arg);
588 if (!capable(CAP_NET_ADMIN))
590 return hci_dev_cmd(cmd, argp);
593 return hci_inquiry(argp);
597 err = hci_sock_bound_ioctl(sk, cmd, arg);
603 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
606 struct sockaddr_hci haddr;
607 struct sock *sk = sock->sk;
608 struct hci_dev *hdev = NULL;
611 BT_DBG("sock %p sk %p", sock, sk);
616 memset(&haddr, 0, sizeof(haddr));
617 len = min_t(unsigned int, sizeof(haddr), addr_len);
618 memcpy(&haddr, addr, len);
620 if (haddr.hci_family != AF_BLUETOOTH)
625 if (sk->sk_state == BT_BOUND) {
630 switch (haddr.hci_channel) {
631 case HCI_CHANNEL_RAW:
632 if (hci_pi(sk)->hdev) {
637 if (haddr.hci_dev != HCI_DEV_NONE) {
638 hdev = hci_dev_get(haddr.hci_dev);
644 atomic_inc(&hdev->promisc);
647 hci_pi(sk)->hdev = hdev;
650 case HCI_CHANNEL_CONTROL:
651 if (haddr.hci_dev != HCI_DEV_NONE) {
656 if (!capable(CAP_NET_ADMIN)) {
663 case HCI_CHANNEL_MONITOR:
664 if (haddr.hci_dev != HCI_DEV_NONE) {
669 if (!capable(CAP_NET_RAW)) {
674 send_monitor_replay(sk);
676 atomic_inc(&monitor_promisc);
685 hci_pi(sk)->channel = haddr.hci_channel;
686 sk->sk_state = BT_BOUND;
693 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
694 int *addr_len, int peer)
696 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
697 struct sock *sk = sock->sk;
698 struct hci_dev *hdev;
701 BT_DBG("sock %p sk %p", sock, sk);
708 hdev = hci_pi(sk)->hdev;
714 *addr_len = sizeof(*haddr);
715 haddr->hci_family = AF_BLUETOOTH;
716 haddr->hci_dev = hdev->id;
717 haddr->hci_channel= hci_pi(sk)->channel;
724 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
727 __u32 mask = hci_pi(sk)->cmsg_mask;
729 if (mask & HCI_CMSG_DIR) {
730 int incoming = bt_cb(skb)->incoming;
731 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
735 if (mask & HCI_CMSG_TSTAMP) {
737 struct compat_timeval ctv;
743 skb_get_timestamp(skb, &tv);
748 if (!COMPAT_USE_64BIT_TIME &&
749 (msg->msg_flags & MSG_CMSG_COMPAT)) {
750 ctv.tv_sec = tv.tv_sec;
751 ctv.tv_usec = tv.tv_usec;
757 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
761 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
762 struct msghdr *msg, size_t len, int flags)
764 int noblock = flags & MSG_DONTWAIT;
765 struct sock *sk = sock->sk;
769 BT_DBG("sock %p, sk %p", sock, sk);
771 if (flags & (MSG_OOB))
774 if (sk->sk_state == BT_CLOSED)
777 skb = skb_recv_datagram(sk, flags, noblock, &err);
781 msg->msg_namelen = 0;
785 msg->msg_flags |= MSG_TRUNC;
789 skb_reset_transport_header(skb);
790 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
792 switch (hci_pi(sk)->channel) {
793 case HCI_CHANNEL_RAW:
794 hci_sock_cmsg(sk, msg, skb);
796 case HCI_CHANNEL_CONTROL:
797 case HCI_CHANNEL_MONITOR:
798 sock_recv_timestamp(msg, sk, skb);
802 skb_free_datagram(sk, skb);
804 return err ? : copied;
807 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
808 struct msghdr *msg, size_t len)
810 struct sock *sk = sock->sk;
811 struct hci_dev *hdev;
815 BT_DBG("sock %p sk %p", sock, sk);
817 if (msg->msg_flags & MSG_OOB)
820 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
823 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
828 switch (hci_pi(sk)->channel) {
829 case HCI_CHANNEL_RAW:
831 case HCI_CHANNEL_CONTROL:
832 err = mgmt_control(sk, msg, len);
834 case HCI_CHANNEL_MONITOR:
842 hdev = hci_pi(sk)->hdev;
848 if (!test_bit(HCI_UP, &hdev->flags)) {
853 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
857 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
862 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
864 skb->dev = (void *) hdev;
866 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
867 u16 opcode = get_unaligned_le16(skb->data);
868 u16 ogf = hci_opcode_ogf(opcode);
869 u16 ocf = hci_opcode_ocf(opcode);
871 if (((ogf > HCI_SFLT_MAX_OGF) ||
872 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
873 &hci_sec_filter.ocf_mask[ogf])) &&
874 !capable(CAP_NET_RAW)) {
879 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
880 skb_queue_tail(&hdev->raw_q, skb);
881 queue_work(hdev->workqueue, &hdev->tx_work);
883 /* Stand-alone HCI commands must be flaged as
884 * single-command requests.
886 bt_cb(skb)->req.start = true;
888 skb_queue_tail(&hdev->cmd_q, skb);
889 queue_work(hdev->workqueue, &hdev->cmd_work);
892 if (!capable(CAP_NET_RAW)) {
897 skb_queue_tail(&hdev->raw_q, skb);
898 queue_work(hdev->workqueue, &hdev->tx_work);
912 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
913 char __user *optval, unsigned int len)
915 struct hci_ufilter uf = { .opcode = 0 };
916 struct sock *sk = sock->sk;
917 int err = 0, opt = 0;
919 BT_DBG("sk %p, opt %d", sk, optname);
923 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
930 if (get_user(opt, (int __user *)optval)) {
936 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
938 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
942 if (get_user(opt, (int __user *)optval)) {
948 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
950 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
955 struct hci_filter *f = &hci_pi(sk)->filter;
957 uf.type_mask = f->type_mask;
958 uf.opcode = f->opcode;
959 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
960 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
963 len = min_t(unsigned int, len, sizeof(uf));
964 if (copy_from_user(&uf, optval, len)) {
969 if (!capable(CAP_NET_RAW)) {
970 uf.type_mask &= hci_sec_filter.type_mask;
971 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
972 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
976 struct hci_filter *f = &hci_pi(sk)->filter;
978 f->type_mask = uf.type_mask;
979 f->opcode = uf.opcode;
980 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
981 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
995 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
996 char __user *optval, int __user *optlen)
998 struct hci_ufilter uf;
999 struct sock *sk = sock->sk;
1000 int len, opt, err = 0;
1002 BT_DBG("sk %p, opt %d", sk, optname);
1004 if (get_user(len, optlen))
1009 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1016 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1021 if (put_user(opt, optval))
1025 case HCI_TIME_STAMP:
1026 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1031 if (put_user(opt, optval))
1037 struct hci_filter *f = &hci_pi(sk)->filter;
1039 memset(&uf, 0, sizeof(uf));
1040 uf.type_mask = f->type_mask;
1041 uf.opcode = f->opcode;
1042 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1043 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1046 len = min_t(unsigned int, len, sizeof(uf));
1047 if (copy_to_user(optval, &uf, len))
1061 static const struct proto_ops hci_sock_ops = {
1062 .family = PF_BLUETOOTH,
1063 .owner = THIS_MODULE,
1064 .release = hci_sock_release,
1065 .bind = hci_sock_bind,
1066 .getname = hci_sock_getname,
1067 .sendmsg = hci_sock_sendmsg,
1068 .recvmsg = hci_sock_recvmsg,
1069 .ioctl = hci_sock_ioctl,
1070 .poll = datagram_poll,
1071 .listen = sock_no_listen,
1072 .shutdown = sock_no_shutdown,
1073 .setsockopt = hci_sock_setsockopt,
1074 .getsockopt = hci_sock_getsockopt,
1075 .connect = sock_no_connect,
1076 .socketpair = sock_no_socketpair,
1077 .accept = sock_no_accept,
1078 .mmap = sock_no_mmap
1081 static struct proto hci_sk_proto = {
1083 .owner = THIS_MODULE,
1084 .obj_size = sizeof(struct hci_pinfo)
1087 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1092 BT_DBG("sock %p", sock);
1094 if (sock->type != SOCK_RAW)
1095 return -ESOCKTNOSUPPORT;
1097 sock->ops = &hci_sock_ops;
1099 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1103 sock_init_data(sock, sk);
1105 sock_reset_flag(sk, SOCK_ZAPPED);
1107 sk->sk_protocol = protocol;
1109 sock->state = SS_UNCONNECTED;
1110 sk->sk_state = BT_OPEN;
1112 bt_sock_link(&hci_sk_list, sk);
1116 static const struct net_proto_family hci_sock_family_ops = {
1117 .family = PF_BLUETOOTH,
1118 .owner = THIS_MODULE,
1119 .create = hci_sock_create,
1122 int __init hci_sock_init(void)
1126 err = proto_register(&hci_sk_proto, 0);
1130 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1132 BT_ERR("HCI socket registration failed");
1136 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1138 BT_ERR("Failed to create HCI proc file");
1139 bt_sock_unregister(BTPROTO_HCI);
1143 BT_INFO("HCI socket layer initialized");
1148 proto_unregister(&hci_sk_proto);
1152 void hci_sock_cleanup(void)
1154 bt_procfs_cleanup(&init_net, "hci");
1155 bt_sock_unregister(BTPROTO_HCI);
1156 proto_unregister(&hci_sk_proto);