]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_sock.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36 /* ----- HCI socket interface ----- */
37
38 static inline int hci_test_bit(int nr, void *addr)
39 {
40         return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41 }
42
43 /* Security filter */
44 static struct hci_sec_filter hci_sec_filter = {
45         /* Packet types */
46         0x10,
47         /* Events */
48         { 0x1000d9fe, 0x0000b00c },
49         /* Commands */
50         {
51                 { 0x0 },
52                 /* OGF_LINK_CTL */
53                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
54                 /* OGF_LINK_POLICY */
55                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
56                 /* OGF_HOST_CTL */
57                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
58                 /* OGF_INFO_PARAM */
59                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60                 /* OGF_STATUS_PARAM */
61                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
62         }
63 };
64
65 static struct bt_sock_list hci_sk_list = {
66         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
67 };
68
69 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70 {
71         struct hci_filter *flt;
72         int flt_type, flt_event;
73
74         /* Apply filter */
75         flt = &hci_pi(sk)->filter;
76
77         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78                 flt_type = 0;
79         else
80                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82         if (!test_bit(flt_type, &flt->type_mask))
83                 return true;
84
85         /* Extra filter for event packets only */
86         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87                 return false;
88
89         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91         if (!hci_test_bit(flt_event, &flt->event_mask))
92                 return true;
93
94         /* Check filter only when opcode is set */
95         if (!flt->opcode)
96                 return false;
97
98         if (flt_event == HCI_EV_CMD_COMPLETE &&
99             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100                 return true;
101
102         if (flt_event == HCI_EV_CMD_STATUS &&
103             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104                 return true;
105
106         return false;
107 }
108
109 /* Send frame to RAW socket */
110 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112         struct sock *sk;
113         struct sk_buff *skb_copy = NULL;
114
115         BT_DBG("hdev %p len %d", hdev, skb->len);
116
117         read_lock(&hci_sk_list.lock);
118
119         sk_for_each(sk, &hci_sk_list.head) {
120                 struct sk_buff *nskb;
121
122                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123                         continue;
124
125                 /* Don't send frame to the socket it came from */
126                 if (skb->sk == sk)
127                         continue;
128
129                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
130                         if (is_filtered_packet(sk, skb))
131                                 continue;
132                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
133                         if (!bt_cb(skb)->incoming)
134                                 continue;
135                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
136                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
137                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
138                                 continue;
139                 } else {
140                         /* Don't send frame to other channel types */
141                         continue;
142                 }
143
144                 if (!skb_copy) {
145                         /* Create a private copy with headroom */
146                         skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
147                         if (!skb_copy)
148                                 continue;
149
150                         /* Put type byte before the data */
151                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
152                 }
153
154                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
155                 if (!nskb)
156                         continue;
157
158                 if (sock_queue_rcv_skb(sk, nskb))
159                         kfree_skb(nskb);
160         }
161
162         read_unlock(&hci_sk_list.lock);
163
164         kfree_skb(skb_copy);
165 }
166
167 /* Send frame to control socket */
168 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
169 {
170         struct sock *sk;
171
172         BT_DBG("len %d", skb->len);
173
174         read_lock(&hci_sk_list.lock);
175
176         sk_for_each(sk, &hci_sk_list.head) {
177                 struct sk_buff *nskb;
178
179                 /* Skip the original socket */
180                 if (sk == skip_sk)
181                         continue;
182
183                 if (sk->sk_state != BT_BOUND)
184                         continue;
185
186                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
187                         continue;
188
189                 nskb = skb_clone(skb, GFP_ATOMIC);
190                 if (!nskb)
191                         continue;
192
193                 if (sock_queue_rcv_skb(sk, nskb))
194                         kfree_skb(nskb);
195         }
196
197         read_unlock(&hci_sk_list.lock);
198 }
199
200 /* Send frame to monitor socket */
201 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203         struct sock *sk;
204         struct sk_buff *skb_copy = NULL;
205         __le16 opcode;
206
207         if (!atomic_read(&monitor_promisc))
208                 return;
209
210         BT_DBG("hdev %p len %d", hdev, skb->len);
211
212         switch (bt_cb(skb)->pkt_type) {
213         case HCI_COMMAND_PKT:
214                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
215                 break;
216         case HCI_EVENT_PKT:
217                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
218                 break;
219         case HCI_ACLDATA_PKT:
220                 if (bt_cb(skb)->incoming)
221                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
222                 else
223                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
224                 break;
225         case HCI_SCODATA_PKT:
226                 if (bt_cb(skb)->incoming)
227                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
228                 else
229                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
230                 break;
231         default:
232                 return;
233         }
234
235         read_lock(&hci_sk_list.lock);
236
237         sk_for_each(sk, &hci_sk_list.head) {
238                 struct sk_buff *nskb;
239
240                 if (sk->sk_state != BT_BOUND)
241                         continue;
242
243                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
244                         continue;
245
246                 if (!skb_copy) {
247                         struct hci_mon_hdr *hdr;
248
249                         /* Create a private copy with headroom */
250                         skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
251                                                GFP_ATOMIC);
252                         if (!skb_copy)
253                                 continue;
254
255                         /* Put header before the data */
256                         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
257                         hdr->opcode = opcode;
258                         hdr->index = cpu_to_le16(hdev->id);
259                         hdr->len = cpu_to_le16(skb->len);
260                 }
261
262                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
263                 if (!nskb)
264                         continue;
265
266                 if (sock_queue_rcv_skb(sk, nskb))
267                         kfree_skb(nskb);
268         }
269
270         read_unlock(&hci_sk_list.lock);
271
272         kfree_skb(skb_copy);
273 }
274
275 static void send_monitor_event(struct sk_buff *skb)
276 {
277         struct sock *sk;
278
279         BT_DBG("len %d", skb->len);
280
281         read_lock(&hci_sk_list.lock);
282
283         sk_for_each(sk, &hci_sk_list.head) {
284                 struct sk_buff *nskb;
285
286                 if (sk->sk_state != BT_BOUND)
287                         continue;
288
289                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
290                         continue;
291
292                 nskb = skb_clone(skb, GFP_ATOMIC);
293                 if (!nskb)
294                         continue;
295
296                 if (sock_queue_rcv_skb(sk, nskb))
297                         kfree_skb(nskb);
298         }
299
300         read_unlock(&hci_sk_list.lock);
301 }
302
303 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
304 {
305         struct hci_mon_hdr *hdr;
306         struct hci_mon_new_index *ni;
307         struct sk_buff *skb;
308         __le16 opcode;
309
310         switch (event) {
311         case HCI_DEV_REG:
312                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
313                 if (!skb)
314                         return NULL;
315
316                 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
317                 ni->type = hdev->dev_type;
318                 ni->bus = hdev->bus;
319                 bacpy(&ni->bdaddr, &hdev->bdaddr);
320                 memcpy(ni->name, hdev->name, 8);
321
322                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
323                 break;
324
325         case HCI_DEV_UNREG:
326                 skb = bt_skb_alloc(0, GFP_ATOMIC);
327                 if (!skb)
328                         return NULL;
329
330                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
331                 break;
332
333         default:
334                 return NULL;
335         }
336
337         __net_timestamp(skb);
338
339         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
340         hdr->opcode = opcode;
341         hdr->index = cpu_to_le16(hdev->id);
342         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
343
344         return skb;
345 }
346
347 static void send_monitor_replay(struct sock *sk)
348 {
349         struct hci_dev *hdev;
350
351         read_lock(&hci_dev_list_lock);
352
353         list_for_each_entry(hdev, &hci_dev_list, list) {
354                 struct sk_buff *skb;
355
356                 skb = create_monitor_event(hdev, HCI_DEV_REG);
357                 if (!skb)
358                         continue;
359
360                 if (sock_queue_rcv_skb(sk, skb))
361                         kfree_skb(skb);
362         }
363
364         read_unlock(&hci_dev_list_lock);
365 }
366
367 /* Generate internal stack event */
368 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
369 {
370         struct hci_event_hdr *hdr;
371         struct hci_ev_stack_internal *ev;
372         struct sk_buff *skb;
373
374         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
375         if (!skb)
376                 return;
377
378         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
379         hdr->evt  = HCI_EV_STACK_INTERNAL;
380         hdr->plen = sizeof(*ev) + dlen;
381
382         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
383         ev->type = type;
384         memcpy(ev->data, data, dlen);
385
386         bt_cb(skb)->incoming = 1;
387         __net_timestamp(skb);
388
389         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
390         hci_send_to_sock(hdev, skb);
391         kfree_skb(skb);
392 }
393
394 void hci_sock_dev_event(struct hci_dev *hdev, int event)
395 {
396         struct hci_ev_si_device ev;
397
398         BT_DBG("hdev %s event %d", hdev->name, event);
399
400         /* Send event to monitor */
401         if (atomic_read(&monitor_promisc)) {
402                 struct sk_buff *skb;
403
404                 skb = create_monitor_event(hdev, event);
405                 if (skb) {
406                         send_monitor_event(skb);
407                         kfree_skb(skb);
408                 }
409         }
410
411         /* Send event to sockets */
412         ev.event  = event;
413         ev.dev_id = hdev->id;
414         hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415
416         if (event == HCI_DEV_UNREG) {
417                 struct sock *sk;
418
419                 /* Detach sockets from device */
420                 read_lock(&hci_sk_list.lock);
421                 sk_for_each(sk, &hci_sk_list.head) {
422                         bh_lock_sock_nested(sk);
423                         if (hci_pi(sk)->hdev == hdev) {
424                                 hci_pi(sk)->hdev = NULL;
425                                 sk->sk_err = EPIPE;
426                                 sk->sk_state = BT_OPEN;
427                                 sk->sk_state_change(sk);
428
429                                 hci_dev_put(hdev);
430                         }
431                         bh_unlock_sock(sk);
432                 }
433                 read_unlock(&hci_sk_list.lock);
434         }
435 }
436
437 static int hci_sock_release(struct socket *sock)
438 {
439         struct sock *sk = sock->sk;
440         struct hci_dev *hdev;
441
442         BT_DBG("sock %p sk %p", sock, sk);
443
444         if (!sk)
445                 return 0;
446
447         hdev = hci_pi(sk)->hdev;
448
449         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
450                 atomic_dec(&monitor_promisc);
451
452         bt_sock_unlink(&hci_sk_list, sk);
453
454         if (hdev) {
455                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
456                         mgmt_index_added(hdev);
457                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
458                         hci_dev_close(hdev->id);
459                 }
460
461                 atomic_dec(&hdev->promisc);
462                 hci_dev_put(hdev);
463         }
464
465         sock_orphan(sk);
466
467         skb_queue_purge(&sk->sk_receive_queue);
468         skb_queue_purge(&sk->sk_write_queue);
469
470         sock_put(sk);
471         return 0;
472 }
473
474 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
475 {
476         bdaddr_t bdaddr;
477         int err;
478
479         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
480                 return -EFAULT;
481
482         hci_dev_lock(hdev);
483
484         err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
485
486         hci_dev_unlock(hdev);
487
488         return err;
489 }
490
491 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
492 {
493         bdaddr_t bdaddr;
494         int err;
495
496         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
497                 return -EFAULT;
498
499         hci_dev_lock(hdev);
500
501         err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
502
503         hci_dev_unlock(hdev);
504
505         return err;
506 }
507
508 /* Ioctls that require bound socket */
509 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
510                                 unsigned long arg)
511 {
512         struct hci_dev *hdev = hci_pi(sk)->hdev;
513
514         if (!hdev)
515                 return -EBADFD;
516
517         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
518                 return -EBUSY;
519
520         if (hdev->dev_type != HCI_BREDR)
521                 return -EOPNOTSUPP;
522
523         switch (cmd) {
524         case HCISETRAW:
525                 if (!capable(CAP_NET_ADMIN))
526                         return -EPERM;
527                 return -EOPNOTSUPP;
528
529         case HCIGETCONNINFO:
530                 return hci_get_conn_info(hdev, (void __user *) arg);
531
532         case HCIGETAUTHINFO:
533                 return hci_get_auth_info(hdev, (void __user *) arg);
534
535         case HCIBLOCKADDR:
536                 if (!capable(CAP_NET_ADMIN))
537                         return -EPERM;
538                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
539
540         case HCIUNBLOCKADDR:
541                 if (!capable(CAP_NET_ADMIN))
542                         return -EPERM;
543                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
544         }
545
546         return -ENOIOCTLCMD;
547 }
548
549 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
550                           unsigned long arg)
551 {
552         void __user *argp = (void __user *) arg;
553         struct sock *sk = sock->sk;
554         int err;
555
556         BT_DBG("cmd %x arg %lx", cmd, arg);
557
558         lock_sock(sk);
559
560         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
561                 err = -EBADFD;
562                 goto done;
563         }
564
565         release_sock(sk);
566
567         switch (cmd) {
568         case HCIGETDEVLIST:
569                 return hci_get_dev_list(argp);
570
571         case HCIGETDEVINFO:
572                 return hci_get_dev_info(argp);
573
574         case HCIGETCONNLIST:
575                 return hci_get_conn_list(argp);
576
577         case HCIDEVUP:
578                 if (!capable(CAP_NET_ADMIN))
579                         return -EPERM;
580                 return hci_dev_open(arg);
581
582         case HCIDEVDOWN:
583                 if (!capable(CAP_NET_ADMIN))
584                         return -EPERM;
585                 return hci_dev_close(arg);
586
587         case HCIDEVRESET:
588                 if (!capable(CAP_NET_ADMIN))
589                         return -EPERM;
590                 return hci_dev_reset(arg);
591
592         case HCIDEVRESTAT:
593                 if (!capable(CAP_NET_ADMIN))
594                         return -EPERM;
595                 return hci_dev_reset_stat(arg);
596
597         case HCISETSCAN:
598         case HCISETAUTH:
599         case HCISETENCRYPT:
600         case HCISETPTYPE:
601         case HCISETLINKPOL:
602         case HCISETLINKMODE:
603         case HCISETACLMTU:
604         case HCISETSCOMTU:
605                 if (!capable(CAP_NET_ADMIN))
606                         return -EPERM;
607                 return hci_dev_cmd(cmd, argp);
608
609         case HCIINQUIRY:
610                 return hci_inquiry(argp);
611         }
612
613         lock_sock(sk);
614
615         err = hci_sock_bound_ioctl(sk, cmd, arg);
616
617 done:
618         release_sock(sk);
619         return err;
620 }
621
622 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
623                          int addr_len)
624 {
625         struct sockaddr_hci haddr;
626         struct sock *sk = sock->sk;
627         struct hci_dev *hdev = NULL;
628         int len, err = 0;
629
630         BT_DBG("sock %p sk %p", sock, sk);
631
632         if (!addr)
633                 return -EINVAL;
634
635         memset(&haddr, 0, sizeof(haddr));
636         len = min_t(unsigned int, sizeof(haddr), addr_len);
637         memcpy(&haddr, addr, len);
638
639         if (haddr.hci_family != AF_BLUETOOTH)
640                 return -EINVAL;
641
642         lock_sock(sk);
643
644         if (sk->sk_state == BT_BOUND) {
645                 err = -EALREADY;
646                 goto done;
647         }
648
649         switch (haddr.hci_channel) {
650         case HCI_CHANNEL_RAW:
651                 if (hci_pi(sk)->hdev) {
652                         err = -EALREADY;
653                         goto done;
654                 }
655
656                 if (haddr.hci_dev != HCI_DEV_NONE) {
657                         hdev = hci_dev_get(haddr.hci_dev);
658                         if (!hdev) {
659                                 err = -ENODEV;
660                                 goto done;
661                         }
662
663                         atomic_inc(&hdev->promisc);
664                 }
665
666                 hci_pi(sk)->hdev = hdev;
667                 break;
668
669         case HCI_CHANNEL_USER:
670                 if (hci_pi(sk)->hdev) {
671                         err = -EALREADY;
672                         goto done;
673                 }
674
675                 if (haddr.hci_dev == HCI_DEV_NONE) {
676                         err = -EINVAL;
677                         goto done;
678                 }
679
680                 if (!capable(CAP_NET_ADMIN)) {
681                         err = -EPERM;
682                         goto done;
683                 }
684
685                 hdev = hci_dev_get(haddr.hci_dev);
686                 if (!hdev) {
687                         err = -ENODEV;
688                         goto done;
689                 }
690
691                 if (test_bit(HCI_UP, &hdev->flags) ||
692                     test_bit(HCI_INIT, &hdev->flags) ||
693                     test_bit(HCI_SETUP, &hdev->dev_flags)) {
694                         err = -EBUSY;
695                         hci_dev_put(hdev);
696                         goto done;
697                 }
698
699                 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
700                         err = -EUSERS;
701                         hci_dev_put(hdev);
702                         goto done;
703                 }
704
705                 mgmt_index_removed(hdev);
706
707                 err = hci_dev_open(hdev->id);
708                 if (err) {
709                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
710                         mgmt_index_added(hdev);
711                         hci_dev_put(hdev);
712                         goto done;
713                 }
714
715                 atomic_inc(&hdev->promisc);
716
717                 hci_pi(sk)->hdev = hdev;
718                 break;
719
720         case HCI_CHANNEL_CONTROL:
721                 if (haddr.hci_dev != HCI_DEV_NONE) {
722                         err = -EINVAL;
723                         goto done;
724                 }
725
726                 if (!capable(CAP_NET_ADMIN)) {
727                         err = -EPERM;
728                         goto done;
729                 }
730
731                 break;
732
733         case HCI_CHANNEL_MONITOR:
734                 if (haddr.hci_dev != HCI_DEV_NONE) {
735                         err = -EINVAL;
736                         goto done;
737                 }
738
739                 if (!capable(CAP_NET_RAW)) {
740                         err = -EPERM;
741                         goto done;
742                 }
743
744                 send_monitor_replay(sk);
745
746                 atomic_inc(&monitor_promisc);
747                 break;
748
749         default:
750                 err = -EINVAL;
751                 goto done;
752         }
753
754
755         hci_pi(sk)->channel = haddr.hci_channel;
756         sk->sk_state = BT_BOUND;
757
758 done:
759         release_sock(sk);
760         return err;
761 }
762
763 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
764                             int *addr_len, int peer)
765 {
766         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
767         struct sock *sk = sock->sk;
768         struct hci_dev *hdev;
769         int err = 0;
770
771         BT_DBG("sock %p sk %p", sock, sk);
772
773         if (peer)
774                 return -EOPNOTSUPP;
775
776         lock_sock(sk);
777
778         hdev = hci_pi(sk)->hdev;
779         if (!hdev) {
780                 err = -EBADFD;
781                 goto done;
782         }
783
784         *addr_len = sizeof(*haddr);
785         haddr->hci_family = AF_BLUETOOTH;
786         haddr->hci_dev    = hdev->id;
787         haddr->hci_channel= hci_pi(sk)->channel;
788
789 done:
790         release_sock(sk);
791         return err;
792 }
793
794 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
795                           struct sk_buff *skb)
796 {
797         __u32 mask = hci_pi(sk)->cmsg_mask;
798
799         if (mask & HCI_CMSG_DIR) {
800                 int incoming = bt_cb(skb)->incoming;
801                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
802                          &incoming);
803         }
804
805         if (mask & HCI_CMSG_TSTAMP) {
806 #ifdef CONFIG_COMPAT
807                 struct compat_timeval ctv;
808 #endif
809                 struct timeval tv;
810                 void *data;
811                 int len;
812
813                 skb_get_timestamp(skb, &tv);
814
815                 data = &tv;
816                 len = sizeof(tv);
817 #ifdef CONFIG_COMPAT
818                 if (!COMPAT_USE_64BIT_TIME &&
819                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
820                         ctv.tv_sec = tv.tv_sec;
821                         ctv.tv_usec = tv.tv_usec;
822                         data = &ctv;
823                         len = sizeof(ctv);
824                 }
825 #endif
826
827                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
828         }
829 }
830
831 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
832                             struct msghdr *msg, size_t len, int flags)
833 {
834         int noblock = flags & MSG_DONTWAIT;
835         struct sock *sk = sock->sk;
836         struct sk_buff *skb;
837         int copied, err;
838
839         BT_DBG("sock %p, sk %p", sock, sk);
840
841         if (flags & (MSG_OOB))
842                 return -EOPNOTSUPP;
843
844         if (sk->sk_state == BT_CLOSED)
845                 return 0;
846
847         skb = skb_recv_datagram(sk, flags, noblock, &err);
848         if (!skb)
849                 return err;
850
851         copied = skb->len;
852         if (len < copied) {
853                 msg->msg_flags |= MSG_TRUNC;
854                 copied = len;
855         }
856
857         skb_reset_transport_header(skb);
858         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
859
860         switch (hci_pi(sk)->channel) {
861         case HCI_CHANNEL_RAW:
862                 hci_sock_cmsg(sk, msg, skb);
863                 break;
864         case HCI_CHANNEL_USER:
865         case HCI_CHANNEL_CONTROL:
866         case HCI_CHANNEL_MONITOR:
867                 sock_recv_timestamp(msg, sk, skb);
868                 break;
869         }
870
871         skb_free_datagram(sk, skb);
872
873         return err ? : copied;
874 }
875
876 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
877                             struct msghdr *msg, size_t len)
878 {
879         struct sock *sk = sock->sk;
880         struct hci_dev *hdev;
881         struct sk_buff *skb;
882         int err;
883
884         BT_DBG("sock %p sk %p", sock, sk);
885
886         if (msg->msg_flags & MSG_OOB)
887                 return -EOPNOTSUPP;
888
889         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
890                 return -EINVAL;
891
892         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
893                 return -EINVAL;
894
895         lock_sock(sk);
896
897         switch (hci_pi(sk)->channel) {
898         case HCI_CHANNEL_RAW:
899         case HCI_CHANNEL_USER:
900                 break;
901         case HCI_CHANNEL_CONTROL:
902                 err = mgmt_control(sk, msg, len);
903                 goto done;
904         case HCI_CHANNEL_MONITOR:
905                 err = -EOPNOTSUPP;
906                 goto done;
907         default:
908                 err = -EINVAL;
909                 goto done;
910         }
911
912         hdev = hci_pi(sk)->hdev;
913         if (!hdev) {
914                 err = -EBADFD;
915                 goto done;
916         }
917
918         if (!test_bit(HCI_UP, &hdev->flags)) {
919                 err = -ENETDOWN;
920                 goto done;
921         }
922
923         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
924         if (!skb)
925                 goto done;
926
927         if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
928                 err = -EFAULT;
929                 goto drop;
930         }
931
932         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
933         skb_pull(skb, 1);
934
935         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
936                 /* No permission check is needed for user channel
937                  * since that gets enforced when binding the socket.
938                  *
939                  * However check that the packet type is valid.
940                  */
941                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
942                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
943                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
944                         err = -EINVAL;
945                         goto drop;
946                 }
947
948                 skb_queue_tail(&hdev->raw_q, skb);
949                 queue_work(hdev->workqueue, &hdev->tx_work);
950         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
951                 u16 opcode = get_unaligned_le16(skb->data);
952                 u16 ogf = hci_opcode_ogf(opcode);
953                 u16 ocf = hci_opcode_ocf(opcode);
954
955                 if (((ogf > HCI_SFLT_MAX_OGF) ||
956                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
957                                    &hci_sec_filter.ocf_mask[ogf])) &&
958                     !capable(CAP_NET_RAW)) {
959                         err = -EPERM;
960                         goto drop;
961                 }
962
963                 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
964                         skb_queue_tail(&hdev->raw_q, skb);
965                         queue_work(hdev->workqueue, &hdev->tx_work);
966                 } else {
967                         /* Stand-alone HCI commands must be flaged as
968                          * single-command requests.
969                          */
970                         bt_cb(skb)->req.start = true;
971
972                         skb_queue_tail(&hdev->cmd_q, skb);
973                         queue_work(hdev->workqueue, &hdev->cmd_work);
974                 }
975         } else {
976                 if (!capable(CAP_NET_RAW)) {
977                         err = -EPERM;
978                         goto drop;
979                 }
980
981                 skb_queue_tail(&hdev->raw_q, skb);
982                 queue_work(hdev->workqueue, &hdev->tx_work);
983         }
984
985         err = len;
986
987 done:
988         release_sock(sk);
989         return err;
990
991 drop:
992         kfree_skb(skb);
993         goto done;
994 }
995
996 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
997                                char __user *optval, unsigned int len)
998 {
999         struct hci_ufilter uf = { .opcode = 0 };
1000         struct sock *sk = sock->sk;
1001         int err = 0, opt = 0;
1002
1003         BT_DBG("sk %p, opt %d", sk, optname);
1004
1005         lock_sock(sk);
1006
1007         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1008                 err = -EBADFD;
1009                 goto done;
1010         }
1011
1012         switch (optname) {
1013         case HCI_DATA_DIR:
1014                 if (get_user(opt, (int __user *)optval)) {
1015                         err = -EFAULT;
1016                         break;
1017                 }
1018
1019                 if (opt)
1020                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1021                 else
1022                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1023                 break;
1024
1025         case HCI_TIME_STAMP:
1026                 if (get_user(opt, (int __user *)optval)) {
1027                         err = -EFAULT;
1028                         break;
1029                 }
1030
1031                 if (opt)
1032                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1033                 else
1034                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1035                 break;
1036
1037         case HCI_FILTER:
1038                 {
1039                         struct hci_filter *f = &hci_pi(sk)->filter;
1040
1041                         uf.type_mask = f->type_mask;
1042                         uf.opcode    = f->opcode;
1043                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1044                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1045                 }
1046
1047                 len = min_t(unsigned int, len, sizeof(uf));
1048                 if (copy_from_user(&uf, optval, len)) {
1049                         err = -EFAULT;
1050                         break;
1051                 }
1052
1053                 if (!capable(CAP_NET_RAW)) {
1054                         uf.type_mask &= hci_sec_filter.type_mask;
1055                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1056                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1057                 }
1058
1059                 {
1060                         struct hci_filter *f = &hci_pi(sk)->filter;
1061
1062                         f->type_mask = uf.type_mask;
1063                         f->opcode    = uf.opcode;
1064                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1065                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1066                 }
1067                 break;
1068
1069         default:
1070                 err = -ENOPROTOOPT;
1071                 break;
1072         }
1073
1074 done:
1075         release_sock(sk);
1076         return err;
1077 }
1078
1079 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1080                                char __user *optval, int __user *optlen)
1081 {
1082         struct hci_ufilter uf;
1083         struct sock *sk = sock->sk;
1084         int len, opt, err = 0;
1085
1086         BT_DBG("sk %p, opt %d", sk, optname);
1087
1088         if (get_user(len, optlen))
1089                 return -EFAULT;
1090
1091         lock_sock(sk);
1092
1093         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1094                 err = -EBADFD;
1095                 goto done;
1096         }
1097
1098         switch (optname) {
1099         case HCI_DATA_DIR:
1100                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1101                         opt = 1;
1102                 else
1103                         opt = 0;
1104
1105                 if (put_user(opt, optval))
1106                         err = -EFAULT;
1107                 break;
1108
1109         case HCI_TIME_STAMP:
1110                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1111                         opt = 1;
1112                 else
1113                         opt = 0;
1114
1115                 if (put_user(opt, optval))
1116                         err = -EFAULT;
1117                 break;
1118
1119         case HCI_FILTER:
1120                 {
1121                         struct hci_filter *f = &hci_pi(sk)->filter;
1122
1123                         memset(&uf, 0, sizeof(uf));
1124                         uf.type_mask = f->type_mask;
1125                         uf.opcode    = f->opcode;
1126                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1127                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1128                 }
1129
1130                 len = min_t(unsigned int, len, sizeof(uf));
1131                 if (copy_to_user(optval, &uf, len))
1132                         err = -EFAULT;
1133                 break;
1134
1135         default:
1136                 err = -ENOPROTOOPT;
1137                 break;
1138         }
1139
1140 done:
1141         release_sock(sk);
1142         return err;
1143 }
1144
1145 static const struct proto_ops hci_sock_ops = {
1146         .family         = PF_BLUETOOTH,
1147         .owner          = THIS_MODULE,
1148         .release        = hci_sock_release,
1149         .bind           = hci_sock_bind,
1150         .getname        = hci_sock_getname,
1151         .sendmsg        = hci_sock_sendmsg,
1152         .recvmsg        = hci_sock_recvmsg,
1153         .ioctl          = hci_sock_ioctl,
1154         .poll           = datagram_poll,
1155         .listen         = sock_no_listen,
1156         .shutdown       = sock_no_shutdown,
1157         .setsockopt     = hci_sock_setsockopt,
1158         .getsockopt     = hci_sock_getsockopt,
1159         .connect        = sock_no_connect,
1160         .socketpair     = sock_no_socketpair,
1161         .accept         = sock_no_accept,
1162         .mmap           = sock_no_mmap
1163 };
1164
1165 static struct proto hci_sk_proto = {
1166         .name           = "HCI",
1167         .owner          = THIS_MODULE,
1168         .obj_size       = sizeof(struct hci_pinfo)
1169 };
1170
1171 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1172                            int kern)
1173 {
1174         struct sock *sk;
1175
1176         BT_DBG("sock %p", sock);
1177
1178         if (sock->type != SOCK_RAW)
1179                 return -ESOCKTNOSUPPORT;
1180
1181         sock->ops = &hci_sock_ops;
1182
1183         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1184         if (!sk)
1185                 return -ENOMEM;
1186
1187         sock_init_data(sock, sk);
1188
1189         sock_reset_flag(sk, SOCK_ZAPPED);
1190
1191         sk->sk_protocol = protocol;
1192
1193         sock->state = SS_UNCONNECTED;
1194         sk->sk_state = BT_OPEN;
1195
1196         bt_sock_link(&hci_sk_list, sk);
1197         return 0;
1198 }
1199
1200 static const struct net_proto_family hci_sock_family_ops = {
1201         .family = PF_BLUETOOTH,
1202         .owner  = THIS_MODULE,
1203         .create = hci_sock_create,
1204 };
1205
1206 int __init hci_sock_init(void)
1207 {
1208         int err;
1209
1210         err = proto_register(&hci_sk_proto, 0);
1211         if (err < 0)
1212                 return err;
1213
1214         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1215         if (err < 0) {
1216                 BT_ERR("HCI socket registration failed");
1217                 goto error;
1218         }
1219
1220         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1221         if (err < 0) {
1222                 BT_ERR("Failed to create HCI proc file");
1223                 bt_sock_unregister(BTPROTO_HCI);
1224                 goto error;
1225         }
1226
1227         BT_INFO("HCI socket layer initialized");
1228
1229         return 0;
1230
1231 error:
1232         proto_unregister(&hci_sk_proto);
1233         return err;
1234 }
1235
1236 void hci_sock_cleanup(void)
1237 {
1238         bt_procfs_cleanup(&init_net, "hci");
1239         bt_sock_unregister(BTPROTO_HCI);
1240         proto_unregister(&hci_sk_proto);
1241 }