]> git.karo-electronics.de Git - mv-sheeva.git/blob - net/bluetooth/hci_core.c
Bluetooth: mgmt: Allow connectable/discoverable changes in off state
[mv-sheeva.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_rx_work(struct work_struct *work);
58 static void hci_cmd_work(struct work_struct *work);
59 static void hci_tx_work(struct work_struct *work);
60
61 /* HCI device list */
62 LIST_HEAD(hci_dev_list);
63 DEFINE_RWLOCK(hci_dev_list_lock);
64
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list);
67 DEFINE_RWLOCK(hci_cb_list_lock);
68
69 /* ---- HCI notifications ---- */
70
71 static void hci_notify(struct hci_dev *hdev, int event)
72 {
73         hci_sock_dev_event(hdev, event);
74 }
75
76 /* ---- HCI requests ---- */
77
78 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
79 {
80         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
82         /* If this is the init phase check if the completed command matches
83          * the last init command, and if not just return.
84          */
85         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
86                 return;
87
88         if (hdev->req_status == HCI_REQ_PEND) {
89                 hdev->req_result = result;
90                 hdev->req_status = HCI_REQ_DONE;
91                 wake_up_interruptible(&hdev->req_wait_q);
92         }
93 }
94
95 static void hci_req_cancel(struct hci_dev *hdev, int err)
96 {
97         BT_DBG("%s err 0x%2.2x", hdev->name, err);
98
99         if (hdev->req_status == HCI_REQ_PEND) {
100                 hdev->req_result = err;
101                 hdev->req_status = HCI_REQ_CANCELED;
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 /* Execute request and wait for completion. */
107 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
108                                         unsigned long opt, __u32 timeout)
109 {
110         DECLARE_WAITQUEUE(wait, current);
111         int err = 0;
112
113         BT_DBG("%s start", hdev->name);
114
115         hdev->req_status = HCI_REQ_PEND;
116
117         add_wait_queue(&hdev->req_wait_q, &wait);
118         set_current_state(TASK_INTERRUPTIBLE);
119
120         req(hdev, opt);
121         schedule_timeout(timeout);
122
123         remove_wait_queue(&hdev->req_wait_q, &wait);
124
125         if (signal_pending(current))
126                 return -EINTR;
127
128         switch (hdev->req_status) {
129         case HCI_REQ_DONE:
130                 err = -bt_to_errno(hdev->req_result);
131                 break;
132
133         case HCI_REQ_CANCELED:
134                 err = -hdev->req_result;
135                 break;
136
137         default:
138                 err = -ETIMEDOUT;
139                 break;
140         }
141
142         hdev->req_status = hdev->req_result = 0;
143
144         BT_DBG("%s end: err %d", hdev->name, err);
145
146         return err;
147 }
148
149 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
150                                         unsigned long opt, __u32 timeout)
151 {
152         int ret;
153
154         if (!test_bit(HCI_UP, &hdev->flags))
155                 return -ENETDOWN;
156
157         /* Serialize all requests */
158         hci_req_lock(hdev);
159         ret = __hci_request(hdev, req, opt, timeout);
160         hci_req_unlock(hdev);
161
162         return ret;
163 }
164
165 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
166 {
167         BT_DBG("%s %ld", hdev->name, opt);
168
169         /* Reset device */
170         set_bit(HCI_RESET, &hdev->flags);
171         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
172 }
173
174 static void bredr_init(struct hci_dev *hdev)
175 {
176         struct hci_cp_delete_stored_link_key cp;
177         __le16 param;
178         __u8 flt_type;
179
180         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
181
182         /* Mandatory initialization */
183
184         /* Reset */
185         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
186                 set_bit(HCI_RESET, &hdev->flags);
187                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188         }
189
190         /* Read Local Supported Features */
191         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
192
193         /* Read Local Version */
194         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
195
196         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
197         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
198
199         /* Read BD Address */
200         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201
202         /* Read Class of Device */
203         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204
205         /* Read Local Name */
206         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
207
208         /* Read Voice Setting */
209         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
210
211         /* Optional initialization */
212
213         /* Clear Event Filters */
214         flt_type = HCI_FLT_CLEAR_ALL;
215         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
216
217         /* Connection accept timeout ~20 secs */
218         param = cpu_to_le16(0x7d00);
219         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
220
221         bacpy(&cp.bdaddr, BDADDR_ANY);
222         cp.delete_all = 1;
223         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
224 }
225
226 static void amp_init(struct hci_dev *hdev)
227 {
228         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229
230         /* Reset */
231         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
232
233         /* Read Local Version */
234         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
235 }
236
237 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238 {
239         struct sk_buff *skb;
240
241         BT_DBG("%s %ld", hdev->name, opt);
242
243         /* Driver initialization */
244
245         /* Special commands */
246         while ((skb = skb_dequeue(&hdev->driver_init))) {
247                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248                 skb->dev = (void *) hdev;
249
250                 skb_queue_tail(&hdev->cmd_q, skb);
251                 queue_work(hdev->workqueue, &hdev->cmd_work);
252         }
253         skb_queue_purge(&hdev->driver_init);
254
255         switch (hdev->dev_type) {
256         case HCI_BREDR:
257                 bredr_init(hdev);
258                 break;
259
260         case HCI_AMP:
261                 amp_init(hdev);
262                 break;
263
264         default:
265                 BT_ERR("Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268
269 }
270
271 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272 {
273         BT_DBG("%s", hdev->name);
274
275         /* Read LE buffer size */
276         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277 }
278
279 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280 {
281         __u8 scan = opt;
282
283         BT_DBG("%s %x", hdev->name, scan);
284
285         /* Inquiry and Page scans */
286         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
287 }
288
289 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290 {
291         __u8 auth = opt;
292
293         BT_DBG("%s %x", hdev->name, auth);
294
295         /* Authentication */
296         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
297 }
298
299 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300 {
301         __u8 encrypt = opt;
302
303         BT_DBG("%s %x", hdev->name, encrypt);
304
305         /* Encryption */
306         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
307 }
308
309 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310 {
311         __le16 policy = cpu_to_le16(opt);
312
313         BT_DBG("%s %x", hdev->name, policy);
314
315         /* Default link policy */
316         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317 }
318
319 /* Get HCI device by index.
320  * Device is held on return. */
321 struct hci_dev *hci_dev_get(int index)
322 {
323         struct hci_dev *hdev = NULL, *d;
324
325         BT_DBG("%d", index);
326
327         if (index < 0)
328                 return NULL;
329
330         read_lock(&hci_dev_list_lock);
331         list_for_each_entry(d, &hci_dev_list, list) {
332                 if (d->id == index) {
333                         hdev = hci_dev_hold(d);
334                         break;
335                 }
336         }
337         read_unlock(&hci_dev_list_lock);
338         return hdev;
339 }
340
341 /* ---- Inquiry support ---- */
342
343 bool hci_discovery_active(struct hci_dev *hdev)
344 {
345         struct discovery_state *discov = &hdev->discovery;
346
347         switch (discov->state) {
348         case DISCOVERY_FINDING:
349         case DISCOVERY_RESOLVING:
350                 return true;
351
352         default:
353                 return false;
354         }
355 }
356
357 void hci_discovery_set_state(struct hci_dev *hdev, int state)
358 {
359         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
360
361         if (hdev->discovery.state == state)
362                 return;
363
364         switch (state) {
365         case DISCOVERY_STOPPED:
366                 if (hdev->discovery.state != DISCOVERY_STARTING)
367                         mgmt_discovering(hdev, 0);
368                 hdev->discovery.type = 0;
369                 break;
370         case DISCOVERY_STARTING:
371                 break;
372         case DISCOVERY_FINDING:
373                 mgmt_discovering(hdev, 1);
374                 break;
375         case DISCOVERY_RESOLVING:
376                 break;
377         case DISCOVERY_STOPPING:
378                 break;
379         }
380
381         hdev->discovery.state = state;
382 }
383
384 static void inquiry_cache_flush(struct hci_dev *hdev)
385 {
386         struct discovery_state *cache = &hdev->discovery;
387         struct inquiry_entry *p, *n;
388
389         list_for_each_entry_safe(p, n, &cache->all, all) {
390                 list_del(&p->all);
391                 kfree(p);
392         }
393
394         INIT_LIST_HEAD(&cache->unknown);
395         INIT_LIST_HEAD(&cache->resolve);
396         cache->state = DISCOVERY_STOPPED;
397 }
398
399 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
400 {
401         struct discovery_state *cache = &hdev->discovery;
402         struct inquiry_entry *e;
403
404         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
405
406         list_for_each_entry(e, &cache->all, all) {
407                 if (!bacmp(&e->data.bdaddr, bdaddr))
408                         return e;
409         }
410
411         return NULL;
412 }
413
414 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
415                                                         bdaddr_t *bdaddr)
416 {
417         struct discovery_state *cache = &hdev->discovery;
418         struct inquiry_entry *e;
419
420         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
422         list_for_each_entry(e, &cache->unknown, list) {
423                 if (!bacmp(&e->data.bdaddr, bdaddr))
424                         return e;
425         }
426
427         return NULL;
428 }
429
430 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
431                                                         bdaddr_t *bdaddr,
432                                                         int state)
433 {
434         struct discovery_state *cache = &hdev->discovery;
435         struct inquiry_entry *e;
436
437         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
438
439         list_for_each_entry(e, &cache->resolve, list) {
440                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
441                         return e;
442                 if (!bacmp(&e->data.bdaddr, bdaddr))
443                         return e;
444         }
445
446         return NULL;
447 }
448
449 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
450                                                 struct inquiry_entry *ie)
451 {
452         struct discovery_state *cache = &hdev->discovery;
453         struct list_head *pos = &cache->resolve;
454         struct inquiry_entry *p;
455
456         list_del(&ie->list);
457
458         list_for_each_entry(p, &cache->resolve, list) {
459                 if (p->name_state != NAME_PENDING &&
460                                 abs(p->data.rssi) >= abs(ie->data.rssi))
461                         break;
462                 pos = &p->list;
463         }
464
465         list_add(&ie->list, pos);
466 }
467
468 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
469                                                         bool name_known)
470 {
471         struct discovery_state *cache = &hdev->discovery;
472         struct inquiry_entry *ie;
473
474         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
475
476         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
477         if (ie) {
478                 if (ie->name_state == NAME_NEEDED &&
479                                                 data->rssi != ie->data.rssi) {
480                         ie->data.rssi = data->rssi;
481                         hci_inquiry_cache_update_resolve(hdev, ie);
482                 }
483
484                 goto update;
485         }
486
487         /* Entry not in the cache. Add new one. */
488         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
489         if (!ie)
490                 return false;
491
492         list_add(&ie->all, &cache->all);
493
494         if (name_known) {
495                 ie->name_state = NAME_KNOWN;
496         } else {
497                 ie->name_state = NAME_NOT_KNOWN;
498                 list_add(&ie->list, &cache->unknown);
499         }
500
501 update:
502         if (name_known && ie->name_state != NAME_KNOWN &&
503                                         ie->name_state != NAME_PENDING) {
504                 ie->name_state = NAME_KNOWN;
505                 list_del(&ie->list);
506         }
507
508         memcpy(&ie->data, data, sizeof(*data));
509         ie->timestamp = jiffies;
510         cache->timestamp = jiffies;
511
512         if (ie->name_state == NAME_NOT_KNOWN)
513                 return false;
514
515         return true;
516 }
517
518 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
519 {
520         struct discovery_state *cache = &hdev->discovery;
521         struct inquiry_info *info = (struct inquiry_info *) buf;
522         struct inquiry_entry *e;
523         int copied = 0;
524
525         list_for_each_entry(e, &cache->all, all) {
526                 struct inquiry_data *data = &e->data;
527
528                 if (copied >= num)
529                         break;
530
531                 bacpy(&info->bdaddr, &data->bdaddr);
532                 info->pscan_rep_mode    = data->pscan_rep_mode;
533                 info->pscan_period_mode = data->pscan_period_mode;
534                 info->pscan_mode        = data->pscan_mode;
535                 memcpy(info->dev_class, data->dev_class, 3);
536                 info->clock_offset      = data->clock_offset;
537
538                 info++;
539                 copied++;
540         }
541
542         BT_DBG("cache %p, copied %d", cache, copied);
543         return copied;
544 }
545
546 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
547 {
548         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
549         struct hci_cp_inquiry cp;
550
551         BT_DBG("%s", hdev->name);
552
553         if (test_bit(HCI_INQUIRY, &hdev->flags))
554                 return;
555
556         /* Start Inquiry */
557         memcpy(&cp.lap, &ir->lap, 3);
558         cp.length  = ir->length;
559         cp.num_rsp = ir->num_rsp;
560         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
561 }
562
563 int hci_inquiry(void __user *arg)
564 {
565         __u8 __user *ptr = arg;
566         struct hci_inquiry_req ir;
567         struct hci_dev *hdev;
568         int err = 0, do_inquiry = 0, max_rsp;
569         long timeo;
570         __u8 *buf;
571
572         if (copy_from_user(&ir, ptr, sizeof(ir)))
573                 return -EFAULT;
574
575         hdev = hci_dev_get(ir.dev_id);
576         if (!hdev)
577                 return -ENODEV;
578
579         hci_dev_lock(hdev);
580         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
581                                 inquiry_cache_empty(hdev) ||
582                                 ir.flags & IREQ_CACHE_FLUSH) {
583                 inquiry_cache_flush(hdev);
584                 do_inquiry = 1;
585         }
586         hci_dev_unlock(hdev);
587
588         timeo = ir.length * msecs_to_jiffies(2000);
589
590         if (do_inquiry) {
591                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
592                 if (err < 0)
593                         goto done;
594         }
595
596         /* for unlimited number of responses we will use buffer with 255 entries */
597         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
598
599         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
600          * copy it to the user space.
601          */
602         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
603         if (!buf) {
604                 err = -ENOMEM;
605                 goto done;
606         }
607
608         hci_dev_lock(hdev);
609         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
610         hci_dev_unlock(hdev);
611
612         BT_DBG("num_rsp %d", ir.num_rsp);
613
614         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
615                 ptr += sizeof(ir);
616                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
617                                         ir.num_rsp))
618                         err = -EFAULT;
619         } else
620                 err = -EFAULT;
621
622         kfree(buf);
623
624 done:
625         hci_dev_put(hdev);
626         return err;
627 }
628
629 /* ---- HCI ioctl helpers ---- */
630
631 int hci_dev_open(__u16 dev)
632 {
633         struct hci_dev *hdev;
634         int ret = 0;
635
636         hdev = hci_dev_get(dev);
637         if (!hdev)
638                 return -ENODEV;
639
640         BT_DBG("%s %p", hdev->name, hdev);
641
642         hci_req_lock(hdev);
643
644         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
645                 ret = -ERFKILL;
646                 goto done;
647         }
648
649         if (test_bit(HCI_UP, &hdev->flags)) {
650                 ret = -EALREADY;
651                 goto done;
652         }
653
654         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
655                 set_bit(HCI_RAW, &hdev->flags);
656
657         /* Treat all non BR/EDR controllers as raw devices if
658            enable_hs is not set */
659         if (hdev->dev_type != HCI_BREDR && !enable_hs)
660                 set_bit(HCI_RAW, &hdev->flags);
661
662         if (hdev->open(hdev)) {
663                 ret = -EIO;
664                 goto done;
665         }
666
667         if (!test_bit(HCI_RAW, &hdev->flags)) {
668                 atomic_set(&hdev->cmd_cnt, 1);
669                 set_bit(HCI_INIT, &hdev->flags);
670                 hdev->init_last_cmd = 0;
671
672                 ret = __hci_request(hdev, hci_init_req, 0,
673                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
674
675                 if (lmp_host_le_capable(hdev))
676                         ret = __hci_request(hdev, hci_le_init_req, 0,
677                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
678
679                 clear_bit(HCI_INIT, &hdev->flags);
680         }
681
682         if (!ret) {
683                 hci_dev_hold(hdev);
684                 set_bit(HCI_UP, &hdev->flags);
685                 hci_notify(hdev, HCI_DEV_UP);
686                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
687                         hci_dev_lock(hdev);
688                         mgmt_powered(hdev, 1);
689                         hci_dev_unlock(hdev);
690                 }
691         } else {
692                 /* Init failed, cleanup */
693                 flush_work(&hdev->tx_work);
694                 flush_work(&hdev->cmd_work);
695                 flush_work(&hdev->rx_work);
696
697                 skb_queue_purge(&hdev->cmd_q);
698                 skb_queue_purge(&hdev->rx_q);
699
700                 if (hdev->flush)
701                         hdev->flush(hdev);
702
703                 if (hdev->sent_cmd) {
704                         kfree_skb(hdev->sent_cmd);
705                         hdev->sent_cmd = NULL;
706                 }
707
708                 hdev->close(hdev);
709                 hdev->flags = 0;
710         }
711
712 done:
713         hci_req_unlock(hdev);
714         hci_dev_put(hdev);
715         return ret;
716 }
717
718 static int hci_dev_do_close(struct hci_dev *hdev)
719 {
720         BT_DBG("%s %p", hdev->name, hdev);
721
722         cancel_work_sync(&hdev->le_scan);
723
724         hci_req_cancel(hdev, ENODEV);
725         hci_req_lock(hdev);
726
727         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
728                 del_timer_sync(&hdev->cmd_timer);
729                 hci_req_unlock(hdev);
730                 return 0;
731         }
732
733         /* Flush RX and TX works */
734         flush_work(&hdev->tx_work);
735         flush_work(&hdev->rx_work);
736
737         if (hdev->discov_timeout > 0) {
738                 cancel_delayed_work(&hdev->discov_off);
739                 hdev->discov_timeout = 0;
740                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
741         }
742
743         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
744                 cancel_delayed_work(&hdev->service_cache);
745
746         cancel_delayed_work_sync(&hdev->le_scan_disable);
747
748         hci_dev_lock(hdev);
749         inquiry_cache_flush(hdev);
750         hci_conn_hash_flush(hdev);
751         hci_dev_unlock(hdev);
752
753         hci_notify(hdev, HCI_DEV_DOWN);
754
755         if (hdev->flush)
756                 hdev->flush(hdev);
757
758         /* Reset device */
759         skb_queue_purge(&hdev->cmd_q);
760         atomic_set(&hdev->cmd_cnt, 1);
761         if (!test_bit(HCI_RAW, &hdev->flags) &&
762                                 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
763                 set_bit(HCI_INIT, &hdev->flags);
764                 __hci_request(hdev, hci_reset_req, 0,
765                                         msecs_to_jiffies(250));
766                 clear_bit(HCI_INIT, &hdev->flags);
767         }
768
769         /* flush cmd  work */
770         flush_work(&hdev->cmd_work);
771
772         /* Drop queues */
773         skb_queue_purge(&hdev->rx_q);
774         skb_queue_purge(&hdev->cmd_q);
775         skb_queue_purge(&hdev->raw_q);
776
777         /* Drop last sent command */
778         if (hdev->sent_cmd) {
779                 del_timer_sync(&hdev->cmd_timer);
780                 kfree_skb(hdev->sent_cmd);
781                 hdev->sent_cmd = NULL;
782         }
783
784         /* After this point our queues are empty
785          * and no tasks are scheduled. */
786         hdev->close(hdev);
787
788         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
789                 hci_dev_lock(hdev);
790                 mgmt_powered(hdev, 0);
791                 hci_dev_unlock(hdev);
792         }
793
794         /* Clear flags */
795         hdev->flags = 0;
796
797         hci_req_unlock(hdev);
798
799         hci_dev_put(hdev);
800         return 0;
801 }
802
803 int hci_dev_close(__u16 dev)
804 {
805         struct hci_dev *hdev;
806         int err;
807
808         hdev = hci_dev_get(dev);
809         if (!hdev)
810                 return -ENODEV;
811
812         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
813                 cancel_delayed_work(&hdev->power_off);
814
815         err = hci_dev_do_close(hdev);
816
817         hci_dev_put(hdev);
818         return err;
819 }
820
821 int hci_dev_reset(__u16 dev)
822 {
823         struct hci_dev *hdev;
824         int ret = 0;
825
826         hdev = hci_dev_get(dev);
827         if (!hdev)
828                 return -ENODEV;
829
830         hci_req_lock(hdev);
831
832         if (!test_bit(HCI_UP, &hdev->flags))
833                 goto done;
834
835         /* Drop queues */
836         skb_queue_purge(&hdev->rx_q);
837         skb_queue_purge(&hdev->cmd_q);
838
839         hci_dev_lock(hdev);
840         inquiry_cache_flush(hdev);
841         hci_conn_hash_flush(hdev);
842         hci_dev_unlock(hdev);
843
844         if (hdev->flush)
845                 hdev->flush(hdev);
846
847         atomic_set(&hdev->cmd_cnt, 1);
848         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
849
850         if (!test_bit(HCI_RAW, &hdev->flags))
851                 ret = __hci_request(hdev, hci_reset_req, 0,
852                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
853
854 done:
855         hci_req_unlock(hdev);
856         hci_dev_put(hdev);
857         return ret;
858 }
859
860 int hci_dev_reset_stat(__u16 dev)
861 {
862         struct hci_dev *hdev;
863         int ret = 0;
864
865         hdev = hci_dev_get(dev);
866         if (!hdev)
867                 return -ENODEV;
868
869         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
870
871         hci_dev_put(hdev);
872
873         return ret;
874 }
875
876 int hci_dev_cmd(unsigned int cmd, void __user *arg)
877 {
878         struct hci_dev *hdev;
879         struct hci_dev_req dr;
880         int err = 0;
881
882         if (copy_from_user(&dr, arg, sizeof(dr)))
883                 return -EFAULT;
884
885         hdev = hci_dev_get(dr.dev_id);
886         if (!hdev)
887                 return -ENODEV;
888
889         switch (cmd) {
890         case HCISETAUTH:
891                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
892                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
893                 break;
894
895         case HCISETENCRYPT:
896                 if (!lmp_encrypt_capable(hdev)) {
897                         err = -EOPNOTSUPP;
898                         break;
899                 }
900
901                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
902                         /* Auth must be enabled first */
903                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
904                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
905                         if (err)
906                                 break;
907                 }
908
909                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
910                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
911                 break;
912
913         case HCISETSCAN:
914                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
915                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
916                 break;
917
918         case HCISETLINKPOL:
919                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
920                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
921                 break;
922
923         case HCISETLINKMODE:
924                 hdev->link_mode = ((__u16) dr.dev_opt) &
925                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
926                 break;
927
928         case HCISETPTYPE:
929                 hdev->pkt_type = (__u16) dr.dev_opt;
930                 break;
931
932         case HCISETACLMTU:
933                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
934                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
935                 break;
936
937         case HCISETSCOMTU:
938                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
939                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
940                 break;
941
942         default:
943                 err = -EINVAL;
944                 break;
945         }
946
947         hci_dev_put(hdev);
948         return err;
949 }
950
951 int hci_get_dev_list(void __user *arg)
952 {
953         struct hci_dev *hdev;
954         struct hci_dev_list_req *dl;
955         struct hci_dev_req *dr;
956         int n = 0, size, err;
957         __u16 dev_num;
958
959         if (get_user(dev_num, (__u16 __user *) arg))
960                 return -EFAULT;
961
962         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
963                 return -EINVAL;
964
965         size = sizeof(*dl) + dev_num * sizeof(*dr);
966
967         dl = kzalloc(size, GFP_KERNEL);
968         if (!dl)
969                 return -ENOMEM;
970
971         dr = dl->dev_req;
972
973         read_lock(&hci_dev_list_lock);
974         list_for_each_entry(hdev, &hci_dev_list, list) {
975                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
976                         cancel_delayed_work(&hdev->power_off);
977
978                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
979                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
980
981                 (dr + n)->dev_id  = hdev->id;
982                 (dr + n)->dev_opt = hdev->flags;
983
984                 if (++n >= dev_num)
985                         break;
986         }
987         read_unlock(&hci_dev_list_lock);
988
989         dl->dev_num = n;
990         size = sizeof(*dl) + n * sizeof(*dr);
991
992         err = copy_to_user(arg, dl, size);
993         kfree(dl);
994
995         return err ? -EFAULT : 0;
996 }
997
998 int hci_get_dev_info(void __user *arg)
999 {
1000         struct hci_dev *hdev;
1001         struct hci_dev_info di;
1002         int err = 0;
1003
1004         if (copy_from_user(&di, arg, sizeof(di)))
1005                 return -EFAULT;
1006
1007         hdev = hci_dev_get(di.dev_id);
1008         if (!hdev)
1009                 return -ENODEV;
1010
1011         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1012                 cancel_delayed_work_sync(&hdev->power_off);
1013
1014         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1015                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1016
1017         strcpy(di.name, hdev->name);
1018         di.bdaddr   = hdev->bdaddr;
1019         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1020         di.flags    = hdev->flags;
1021         di.pkt_type = hdev->pkt_type;
1022         di.acl_mtu  = hdev->acl_mtu;
1023         di.acl_pkts = hdev->acl_pkts;
1024         di.sco_mtu  = hdev->sco_mtu;
1025         di.sco_pkts = hdev->sco_pkts;
1026         di.link_policy = hdev->link_policy;
1027         di.link_mode   = hdev->link_mode;
1028
1029         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1030         memcpy(&di.features, &hdev->features, sizeof(di.features));
1031
1032         if (copy_to_user(arg, &di, sizeof(di)))
1033                 err = -EFAULT;
1034
1035         hci_dev_put(hdev);
1036
1037         return err;
1038 }
1039
1040 /* ---- Interface to HCI drivers ---- */
1041
1042 static int hci_rfkill_set_block(void *data, bool blocked)
1043 {
1044         struct hci_dev *hdev = data;
1045
1046         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1047
1048         if (!blocked)
1049                 return 0;
1050
1051         hci_dev_do_close(hdev);
1052
1053         return 0;
1054 }
1055
1056 static const struct rfkill_ops hci_rfkill_ops = {
1057         .set_block = hci_rfkill_set_block,
1058 };
1059
1060 /* Alloc HCI device */
1061 struct hci_dev *hci_alloc_dev(void)
1062 {
1063         struct hci_dev *hdev;
1064
1065         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1066         if (!hdev)
1067                 return NULL;
1068
1069         hci_init_sysfs(hdev);
1070         skb_queue_head_init(&hdev->driver_init);
1071
1072         return hdev;
1073 }
1074 EXPORT_SYMBOL(hci_alloc_dev);
1075
1076 /* Free HCI device */
1077 void hci_free_dev(struct hci_dev *hdev)
1078 {
1079         skb_queue_purge(&hdev->driver_init);
1080
1081         /* will free via device release */
1082         put_device(&hdev->dev);
1083 }
1084 EXPORT_SYMBOL(hci_free_dev);
1085
1086 static void hci_power_on(struct work_struct *work)
1087 {
1088         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1089
1090         BT_DBG("%s", hdev->name);
1091
1092         if (hci_dev_open(hdev->id) < 0)
1093                 return;
1094
1095         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1096                 schedule_delayed_work(&hdev->power_off,
1097                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1098
1099         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1100                 mgmt_index_added(hdev);
1101 }
1102
1103 static void hci_power_off(struct work_struct *work)
1104 {
1105         struct hci_dev *hdev = container_of(work, struct hci_dev,
1106                                                         power_off.work);
1107
1108         BT_DBG("%s", hdev->name);
1109
1110         hci_dev_do_close(hdev);
1111 }
1112
1113 static void hci_discov_off(struct work_struct *work)
1114 {
1115         struct hci_dev *hdev;
1116         u8 scan = SCAN_PAGE;
1117
1118         hdev = container_of(work, struct hci_dev, discov_off.work);
1119
1120         BT_DBG("%s", hdev->name);
1121
1122         hci_dev_lock(hdev);
1123
1124         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1125
1126         hdev->discov_timeout = 0;
1127
1128         hci_dev_unlock(hdev);
1129 }
1130
1131 int hci_uuids_clear(struct hci_dev *hdev)
1132 {
1133         struct list_head *p, *n;
1134
1135         list_for_each_safe(p, n, &hdev->uuids) {
1136                 struct bt_uuid *uuid;
1137
1138                 uuid = list_entry(p, struct bt_uuid, list);
1139
1140                 list_del(p);
1141                 kfree(uuid);
1142         }
1143
1144         return 0;
1145 }
1146
1147 int hci_link_keys_clear(struct hci_dev *hdev)
1148 {
1149         struct list_head *p, *n;
1150
1151         list_for_each_safe(p, n, &hdev->link_keys) {
1152                 struct link_key *key;
1153
1154                 key = list_entry(p, struct link_key, list);
1155
1156                 list_del(p);
1157                 kfree(key);
1158         }
1159
1160         return 0;
1161 }
1162
1163 int hci_smp_ltks_clear(struct hci_dev *hdev)
1164 {
1165         struct smp_ltk *k, *tmp;
1166
1167         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1168                 list_del(&k->list);
1169                 kfree(k);
1170         }
1171
1172         return 0;
1173 }
1174
1175 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1176 {
1177         struct link_key *k;
1178
1179         list_for_each_entry(k, &hdev->link_keys, list)
1180                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1181                         return k;
1182
1183         return NULL;
1184 }
1185
1186 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1187                                                 u8 key_type, u8 old_key_type)
1188 {
1189         /* Legacy key */
1190         if (key_type < 0x03)
1191                 return 1;
1192
1193         /* Debug keys are insecure so don't store them persistently */
1194         if (key_type == HCI_LK_DEBUG_COMBINATION)
1195                 return 0;
1196
1197         /* Changed combination key and there's no previous one */
1198         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1199                 return 0;
1200
1201         /* Security mode 3 case */
1202         if (!conn)
1203                 return 1;
1204
1205         /* Neither local nor remote side had no-bonding as requirement */
1206         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1207                 return 1;
1208
1209         /* Local side had dedicated bonding as requirement */
1210         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1211                 return 1;
1212
1213         /* Remote side had dedicated bonding as requirement */
1214         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1215                 return 1;
1216
1217         /* If none of the above criteria match, then don't store the key
1218          * persistently */
1219         return 0;
1220 }
1221
1222 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1223 {
1224         struct smp_ltk *k;
1225
1226         list_for_each_entry(k, &hdev->long_term_keys, list) {
1227                 if (k->ediv != ediv ||
1228                                 memcmp(rand, k->rand, sizeof(k->rand)))
1229                         continue;
1230
1231                 return k;
1232         }
1233
1234         return NULL;
1235 }
1236 EXPORT_SYMBOL(hci_find_ltk);
1237
1238 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239                                                                 u8 addr_type)
1240 {
1241         struct smp_ltk *k;
1242
1243         list_for_each_entry(k, &hdev->long_term_keys, list)
1244                 if (addr_type == k->bdaddr_type &&
1245                                         bacmp(bdaddr, &k->bdaddr) == 0)
1246                         return k;
1247
1248         return NULL;
1249 }
1250 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1251
1252 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1253                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1254 {
1255         struct link_key *key, *old_key;
1256         u8 old_key_type, persistent;
1257
1258         old_key = hci_find_link_key(hdev, bdaddr);
1259         if (old_key) {
1260                 old_key_type = old_key->type;
1261                 key = old_key;
1262         } else {
1263                 old_key_type = conn ? conn->key_type : 0xff;
1264                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1265                 if (!key)
1266                         return -ENOMEM;
1267                 list_add(&key->list, &hdev->link_keys);
1268         }
1269
1270         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1271
1272         /* Some buggy controller combinations generate a changed
1273          * combination key for legacy pairing even when there's no
1274          * previous key */
1275         if (type == HCI_LK_CHANGED_COMBINATION &&
1276                                         (!conn || conn->remote_auth == 0xff) &&
1277                                         old_key_type == 0xff) {
1278                 type = HCI_LK_COMBINATION;
1279                 if (conn)
1280                         conn->key_type = type;
1281         }
1282
1283         bacpy(&key->bdaddr, bdaddr);
1284         memcpy(key->val, val, 16);
1285         key->pin_len = pin_len;
1286
1287         if (type == HCI_LK_CHANGED_COMBINATION)
1288                 key->type = old_key_type;
1289         else
1290                 key->type = type;
1291
1292         if (!new_key)
1293                 return 0;
1294
1295         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1296
1297         mgmt_new_link_key(hdev, key, persistent);
1298
1299         if (!persistent) {
1300                 list_del(&key->list);
1301                 kfree(key);
1302         }
1303
1304         return 0;
1305 }
1306
1307 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1308                                 int new_key, u8 authenticated, u8 tk[16],
1309                                 u8 enc_size, u16 ediv, u8 rand[8])
1310 {
1311         struct smp_ltk *key, *old_key;
1312
1313         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1314                 return 0;
1315
1316         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1317         if (old_key)
1318                 key = old_key;
1319         else {
1320                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1321                 if (!key)
1322                         return -ENOMEM;
1323                 list_add(&key->list, &hdev->long_term_keys);
1324         }
1325
1326         bacpy(&key->bdaddr, bdaddr);
1327         key->bdaddr_type = addr_type;
1328         memcpy(key->val, tk, sizeof(key->val));
1329         key->authenticated = authenticated;
1330         key->ediv = ediv;
1331         key->enc_size = enc_size;
1332         key->type = type;
1333         memcpy(key->rand, rand, sizeof(key->rand));
1334
1335         if (!new_key)
1336                 return 0;
1337
1338         if (type & HCI_SMP_LTK)
1339                 mgmt_new_ltk(hdev, key, 1);
1340
1341         return 0;
1342 }
1343
1344 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345 {
1346         struct link_key *key;
1347
1348         key = hci_find_link_key(hdev, bdaddr);
1349         if (!key)
1350                 return -ENOENT;
1351
1352         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1353
1354         list_del(&key->list);
1355         kfree(key);
1356
1357         return 0;
1358 }
1359
1360 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1361 {
1362         struct smp_ltk *k, *tmp;
1363
1364         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1365                 if (bacmp(bdaddr, &k->bdaddr))
1366                         continue;
1367
1368                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1369
1370                 list_del(&k->list);
1371                 kfree(k);
1372         }
1373
1374         return 0;
1375 }
1376
1377 /* HCI command timer function */
1378 static void hci_cmd_timer(unsigned long arg)
1379 {
1380         struct hci_dev *hdev = (void *) arg;
1381
1382         BT_ERR("%s command tx timeout", hdev->name);
1383         atomic_set(&hdev->cmd_cnt, 1);
1384         queue_work(hdev->workqueue, &hdev->cmd_work);
1385 }
1386
1387 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1388                                                         bdaddr_t *bdaddr)
1389 {
1390         struct oob_data *data;
1391
1392         list_for_each_entry(data, &hdev->remote_oob_data, list)
1393                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1394                         return data;
1395
1396         return NULL;
1397 }
1398
1399 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1400 {
1401         struct oob_data *data;
1402
1403         data = hci_find_remote_oob_data(hdev, bdaddr);
1404         if (!data)
1405                 return -ENOENT;
1406
1407         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1408
1409         list_del(&data->list);
1410         kfree(data);
1411
1412         return 0;
1413 }
1414
1415 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1416 {
1417         struct oob_data *data, *n;
1418
1419         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1420                 list_del(&data->list);
1421                 kfree(data);
1422         }
1423
1424         return 0;
1425 }
1426
1427 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1428                                                                 u8 *randomizer)
1429 {
1430         struct oob_data *data;
1431
1432         data = hci_find_remote_oob_data(hdev, bdaddr);
1433
1434         if (!data) {
1435                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1436                 if (!data)
1437                         return -ENOMEM;
1438
1439                 bacpy(&data->bdaddr, bdaddr);
1440                 list_add(&data->list, &hdev->remote_oob_data);
1441         }
1442
1443         memcpy(data->hash, hash, sizeof(data->hash));
1444         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1445
1446         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1447
1448         return 0;
1449 }
1450
1451 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1452                                                 bdaddr_t *bdaddr)
1453 {
1454         struct bdaddr_list *b;
1455
1456         list_for_each_entry(b, &hdev->blacklist, list)
1457                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1458                         return b;
1459
1460         return NULL;
1461 }
1462
1463 int hci_blacklist_clear(struct hci_dev *hdev)
1464 {
1465         struct list_head *p, *n;
1466
1467         list_for_each_safe(p, n, &hdev->blacklist) {
1468                 struct bdaddr_list *b;
1469
1470                 b = list_entry(p, struct bdaddr_list, list);
1471
1472                 list_del(p);
1473                 kfree(b);
1474         }
1475
1476         return 0;
1477 }
1478
1479 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1480 {
1481         struct bdaddr_list *entry;
1482
1483         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1484                 return -EBADF;
1485
1486         if (hci_blacklist_lookup(hdev, bdaddr))
1487                 return -EEXIST;
1488
1489         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1490         if (!entry)
1491                 return -ENOMEM;
1492
1493         bacpy(&entry->bdaddr, bdaddr);
1494
1495         list_add(&entry->list, &hdev->blacklist);
1496
1497         return mgmt_device_blocked(hdev, bdaddr, type);
1498 }
1499
1500 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1501 {
1502         struct bdaddr_list *entry;
1503
1504         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1505                 return hci_blacklist_clear(hdev);
1506
1507         entry = hci_blacklist_lookup(hdev, bdaddr);
1508         if (!entry)
1509                 return -ENOENT;
1510
1511         list_del(&entry->list);
1512         kfree(entry);
1513
1514         return mgmt_device_unblocked(hdev, bdaddr, type);
1515 }
1516
1517 static void hci_clear_adv_cache(struct work_struct *work)
1518 {
1519         struct hci_dev *hdev = container_of(work, struct hci_dev,
1520                                                         adv_work.work);
1521
1522         hci_dev_lock(hdev);
1523
1524         hci_adv_entries_clear(hdev);
1525
1526         hci_dev_unlock(hdev);
1527 }
1528
1529 int hci_adv_entries_clear(struct hci_dev *hdev)
1530 {
1531         struct adv_entry *entry, *tmp;
1532
1533         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1534                 list_del(&entry->list);
1535                 kfree(entry);
1536         }
1537
1538         BT_DBG("%s adv cache cleared", hdev->name);
1539
1540         return 0;
1541 }
1542
1543 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1544 {
1545         struct adv_entry *entry;
1546
1547         list_for_each_entry(entry, &hdev->adv_entries, list)
1548                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1549                         return entry;
1550
1551         return NULL;
1552 }
1553
1554 static inline int is_connectable_adv(u8 evt_type)
1555 {
1556         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1557                 return 1;
1558
1559         return 0;
1560 }
1561
1562 int hci_add_adv_entry(struct hci_dev *hdev,
1563                                         struct hci_ev_le_advertising_info *ev)
1564 {
1565         struct adv_entry *entry;
1566
1567         if (!is_connectable_adv(ev->evt_type))
1568                 return -EINVAL;
1569
1570         /* Only new entries should be added to adv_entries. So, if
1571          * bdaddr was found, don't add it. */
1572         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1573                 return 0;
1574
1575         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1576         if (!entry)
1577                 return -ENOMEM;
1578
1579         bacpy(&entry->bdaddr, &ev->bdaddr);
1580         entry->bdaddr_type = ev->bdaddr_type;
1581
1582         list_add(&entry->list, &hdev->adv_entries);
1583
1584         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1585                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1586
1587         return 0;
1588 }
1589
1590 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1591 {
1592         struct le_scan_params *param =  (struct le_scan_params *) opt;
1593         struct hci_cp_le_set_scan_param cp;
1594
1595         memset(&cp, 0, sizeof(cp));
1596         cp.type = param->type;
1597         cp.interval = cpu_to_le16(param->interval);
1598         cp.window = cpu_to_le16(param->window);
1599
1600         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1601 }
1602
1603 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1604 {
1605         struct hci_cp_le_set_scan_enable cp;
1606
1607         memset(&cp, 0, sizeof(cp));
1608         cp.enable = 1;
1609
1610         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1611 }
1612
1613 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1614                                                 u16 window, int timeout)
1615 {
1616         long timeo = msecs_to_jiffies(3000);
1617         struct le_scan_params param;
1618         int err;
1619
1620         BT_DBG("%s", hdev->name);
1621
1622         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1623                 return -EINPROGRESS;
1624
1625         param.type = type;
1626         param.interval = interval;
1627         param.window = window;
1628
1629         hci_req_lock(hdev);
1630
1631         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1632                                                                         timeo);
1633         if (!err)
1634                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1635
1636         hci_req_unlock(hdev);
1637
1638         if (err < 0)
1639                 return err;
1640
1641         schedule_delayed_work(&hdev->le_scan_disable,
1642                                                 msecs_to_jiffies(timeout));
1643
1644         return 0;
1645 }
1646
1647 static void le_scan_disable_work(struct work_struct *work)
1648 {
1649         struct hci_dev *hdev = container_of(work, struct hci_dev,
1650                                                 le_scan_disable.work);
1651         struct hci_cp_le_set_scan_enable cp;
1652
1653         BT_DBG("%s", hdev->name);
1654
1655         memset(&cp, 0, sizeof(cp));
1656
1657         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1658 }
1659
1660 static void le_scan_work(struct work_struct *work)
1661 {
1662         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1663         struct le_scan_params *param = &hdev->le_scan_params;
1664
1665         BT_DBG("%s", hdev->name);
1666
1667         hci_do_le_scan(hdev, param->type, param->interval,
1668                                         param->window, param->timeout);
1669 }
1670
1671 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1672                                                                 int timeout)
1673 {
1674         struct le_scan_params *param = &hdev->le_scan_params;
1675
1676         BT_DBG("%s", hdev->name);
1677
1678         if (work_busy(&hdev->le_scan))
1679                 return -EINPROGRESS;
1680
1681         param->type = type;
1682         param->interval = interval;
1683         param->window = window;
1684         param->timeout = timeout;
1685
1686         queue_work(system_long_wq, &hdev->le_scan);
1687
1688         return 0;
1689 }
1690
1691 /* Register HCI device */
1692 int hci_register_dev(struct hci_dev *hdev)
1693 {
1694         struct list_head *head = &hci_dev_list, *p;
1695         int i, id, error;
1696
1697         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1698
1699         if (!hdev->open || !hdev->close)
1700                 return -EINVAL;
1701
1702         /* Do not allow HCI_AMP devices to register at index 0,
1703          * so the index can be used as the AMP controller ID.
1704          */
1705         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1706
1707         write_lock(&hci_dev_list_lock);
1708
1709         /* Find first available device id */
1710         list_for_each(p, &hci_dev_list) {
1711                 if (list_entry(p, struct hci_dev, list)->id != id)
1712                         break;
1713                 head = p; id++;
1714         }
1715
1716         sprintf(hdev->name, "hci%d", id);
1717         hdev->id = id;
1718         list_add_tail(&hdev->list, head);
1719
1720         mutex_init(&hdev->lock);
1721
1722         hdev->flags = 0;
1723         hdev->dev_flags = 0;
1724         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1725         hdev->esco_type = (ESCO_HV1);
1726         hdev->link_mode = (HCI_LM_ACCEPT);
1727         hdev->io_capability = 0x03; /* No Input No Output */
1728
1729         hdev->idle_timeout = 0;
1730         hdev->sniff_max_interval = 800;
1731         hdev->sniff_min_interval = 80;
1732
1733         INIT_WORK(&hdev->rx_work, hci_rx_work);
1734         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1735         INIT_WORK(&hdev->tx_work, hci_tx_work);
1736
1737
1738         skb_queue_head_init(&hdev->rx_q);
1739         skb_queue_head_init(&hdev->cmd_q);
1740         skb_queue_head_init(&hdev->raw_q);
1741
1742         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1743
1744         for (i = 0; i < NUM_REASSEMBLY; i++)
1745                 hdev->reassembly[i] = NULL;
1746
1747         init_waitqueue_head(&hdev->req_wait_q);
1748         mutex_init(&hdev->req_lock);
1749
1750         discovery_init(hdev);
1751
1752         hci_conn_hash_init(hdev);
1753
1754         INIT_LIST_HEAD(&hdev->mgmt_pending);
1755
1756         INIT_LIST_HEAD(&hdev->blacklist);
1757
1758         INIT_LIST_HEAD(&hdev->uuids);
1759
1760         INIT_LIST_HEAD(&hdev->link_keys);
1761         INIT_LIST_HEAD(&hdev->long_term_keys);
1762
1763         INIT_LIST_HEAD(&hdev->remote_oob_data);
1764
1765         INIT_LIST_HEAD(&hdev->adv_entries);
1766
1767         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1768         INIT_WORK(&hdev->power_on, hci_power_on);
1769         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1770
1771         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1772
1773         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1774
1775         atomic_set(&hdev->promisc, 0);
1776
1777         INIT_WORK(&hdev->le_scan, le_scan_work);
1778
1779         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1780
1781         write_unlock(&hci_dev_list_lock);
1782
1783         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1784                                                         WQ_MEM_RECLAIM, 1);
1785         if (!hdev->workqueue) {
1786                 error = -ENOMEM;
1787                 goto err;
1788         }
1789
1790         error = hci_add_sysfs(hdev);
1791         if (error < 0)
1792                 goto err_wqueue;
1793
1794         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1795                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1796         if (hdev->rfkill) {
1797                 if (rfkill_register(hdev->rfkill) < 0) {
1798                         rfkill_destroy(hdev->rfkill);
1799                         hdev->rfkill = NULL;
1800                 }
1801         }
1802
1803         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1804         set_bit(HCI_SETUP, &hdev->dev_flags);
1805         schedule_work(&hdev->power_on);
1806
1807         hci_notify(hdev, HCI_DEV_REG);
1808         hci_dev_hold(hdev);
1809
1810         return id;
1811
1812 err_wqueue:
1813         destroy_workqueue(hdev->workqueue);
1814 err:
1815         write_lock(&hci_dev_list_lock);
1816         list_del(&hdev->list);
1817         write_unlock(&hci_dev_list_lock);
1818
1819         return error;
1820 }
1821 EXPORT_SYMBOL(hci_register_dev);
1822
1823 /* Unregister HCI device */
1824 void hci_unregister_dev(struct hci_dev *hdev)
1825 {
1826         int i;
1827
1828         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1829
1830         write_lock(&hci_dev_list_lock);
1831         list_del(&hdev->list);
1832         write_unlock(&hci_dev_list_lock);
1833
1834         hci_dev_do_close(hdev);
1835
1836         for (i = 0; i < NUM_REASSEMBLY; i++)
1837                 kfree_skb(hdev->reassembly[i]);
1838
1839         if (!test_bit(HCI_INIT, &hdev->flags) &&
1840                                 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1841                 hci_dev_lock(hdev);
1842                 mgmt_index_removed(hdev);
1843                 hci_dev_unlock(hdev);
1844         }
1845
1846         /* mgmt_index_removed should take care of emptying the
1847          * pending list */
1848         BUG_ON(!list_empty(&hdev->mgmt_pending));
1849
1850         hci_notify(hdev, HCI_DEV_UNREG);
1851
1852         if (hdev->rfkill) {
1853                 rfkill_unregister(hdev->rfkill);
1854                 rfkill_destroy(hdev->rfkill);
1855         }
1856
1857         hci_del_sysfs(hdev);
1858
1859         cancel_delayed_work_sync(&hdev->adv_work);
1860
1861         destroy_workqueue(hdev->workqueue);
1862
1863         hci_dev_lock(hdev);
1864         hci_blacklist_clear(hdev);
1865         hci_uuids_clear(hdev);
1866         hci_link_keys_clear(hdev);
1867         hci_smp_ltks_clear(hdev);
1868         hci_remote_oob_data_clear(hdev);
1869         hci_adv_entries_clear(hdev);
1870         hci_dev_unlock(hdev);
1871
1872         hci_dev_put(hdev);
1873 }
1874 EXPORT_SYMBOL(hci_unregister_dev);
1875
1876 /* Suspend HCI device */
1877 int hci_suspend_dev(struct hci_dev *hdev)
1878 {
1879         hci_notify(hdev, HCI_DEV_SUSPEND);
1880         return 0;
1881 }
1882 EXPORT_SYMBOL(hci_suspend_dev);
1883
1884 /* Resume HCI device */
1885 int hci_resume_dev(struct hci_dev *hdev)
1886 {
1887         hci_notify(hdev, HCI_DEV_RESUME);
1888         return 0;
1889 }
1890 EXPORT_SYMBOL(hci_resume_dev);
1891
1892 /* Receive frame from HCI drivers */
1893 int hci_recv_frame(struct sk_buff *skb)
1894 {
1895         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1896         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1897                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1898                 kfree_skb(skb);
1899                 return -ENXIO;
1900         }
1901
1902         /* Incomming skb */
1903         bt_cb(skb)->incoming = 1;
1904
1905         /* Time stamp */
1906         __net_timestamp(skb);
1907
1908         skb_queue_tail(&hdev->rx_q, skb);
1909         queue_work(hdev->workqueue, &hdev->rx_work);
1910
1911         return 0;
1912 }
1913 EXPORT_SYMBOL(hci_recv_frame);
1914
1915 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1916                                                   int count, __u8 index)
1917 {
1918         int len = 0;
1919         int hlen = 0;
1920         int remain = count;
1921         struct sk_buff *skb;
1922         struct bt_skb_cb *scb;
1923
1924         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1925                                 index >= NUM_REASSEMBLY)
1926                 return -EILSEQ;
1927
1928         skb = hdev->reassembly[index];
1929
1930         if (!skb) {
1931                 switch (type) {
1932                 case HCI_ACLDATA_PKT:
1933                         len = HCI_MAX_FRAME_SIZE;
1934                         hlen = HCI_ACL_HDR_SIZE;
1935                         break;
1936                 case HCI_EVENT_PKT:
1937                         len = HCI_MAX_EVENT_SIZE;
1938                         hlen = HCI_EVENT_HDR_SIZE;
1939                         break;
1940                 case HCI_SCODATA_PKT:
1941                         len = HCI_MAX_SCO_SIZE;
1942                         hlen = HCI_SCO_HDR_SIZE;
1943                         break;
1944                 }
1945
1946                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1947                 if (!skb)
1948                         return -ENOMEM;
1949
1950                 scb = (void *) skb->cb;
1951                 scb->expect = hlen;
1952                 scb->pkt_type = type;
1953
1954                 skb->dev = (void *) hdev;
1955                 hdev->reassembly[index] = skb;
1956         }
1957
1958         while (count) {
1959                 scb = (void *) skb->cb;
1960                 len = min(scb->expect, (__u16)count);
1961
1962                 memcpy(skb_put(skb, len), data, len);
1963
1964                 count -= len;
1965                 data += len;
1966                 scb->expect -= len;
1967                 remain = count;
1968
1969                 switch (type) {
1970                 case HCI_EVENT_PKT:
1971                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1972                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1973                                 scb->expect = h->plen;
1974
1975                                 if (skb_tailroom(skb) < scb->expect) {
1976                                         kfree_skb(skb);
1977                                         hdev->reassembly[index] = NULL;
1978                                         return -ENOMEM;
1979                                 }
1980                         }
1981                         break;
1982
1983                 case HCI_ACLDATA_PKT:
1984                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1985                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1986                                 scb->expect = __le16_to_cpu(h->dlen);
1987
1988                                 if (skb_tailroom(skb) < scb->expect) {
1989                                         kfree_skb(skb);
1990                                         hdev->reassembly[index] = NULL;
1991                                         return -ENOMEM;
1992                                 }
1993                         }
1994                         break;
1995
1996                 case HCI_SCODATA_PKT:
1997                         if (skb->len == HCI_SCO_HDR_SIZE) {
1998                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1999                                 scb->expect = h->dlen;
2000
2001                                 if (skb_tailroom(skb) < scb->expect) {
2002                                         kfree_skb(skb);
2003                                         hdev->reassembly[index] = NULL;
2004                                         return -ENOMEM;
2005                                 }
2006                         }
2007                         break;
2008                 }
2009
2010                 if (scb->expect == 0) {
2011                         /* Complete frame */
2012
2013                         bt_cb(skb)->pkt_type = type;
2014                         hci_recv_frame(skb);
2015
2016                         hdev->reassembly[index] = NULL;
2017                         return remain;
2018                 }
2019         }
2020
2021         return remain;
2022 }
2023
2024 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2025 {
2026         int rem = 0;
2027
2028         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2029                 return -EILSEQ;
2030
2031         while (count) {
2032                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2033                 if (rem < 0)
2034                         return rem;
2035
2036                 data += (count - rem);
2037                 count = rem;
2038         }
2039
2040         return rem;
2041 }
2042 EXPORT_SYMBOL(hci_recv_fragment);
2043
2044 #define STREAM_REASSEMBLY 0
2045
2046 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2047 {
2048         int type;
2049         int rem = 0;
2050
2051         while (count) {
2052                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2053
2054                 if (!skb) {
2055                         struct { char type; } *pkt;
2056
2057                         /* Start of the frame */
2058                         pkt = data;
2059                         type = pkt->type;
2060
2061                         data++;
2062                         count--;
2063                 } else
2064                         type = bt_cb(skb)->pkt_type;
2065
2066                 rem = hci_reassembly(hdev, type, data, count,
2067                                                         STREAM_REASSEMBLY);
2068                 if (rem < 0)
2069                         return rem;
2070
2071                 data += (count - rem);
2072                 count = rem;
2073         }
2074
2075         return rem;
2076 }
2077 EXPORT_SYMBOL(hci_recv_stream_fragment);
2078
2079 /* ---- Interface to upper protocols ---- */
2080
2081 int hci_register_cb(struct hci_cb *cb)
2082 {
2083         BT_DBG("%p name %s", cb, cb->name);
2084
2085         write_lock(&hci_cb_list_lock);
2086         list_add(&cb->list, &hci_cb_list);
2087         write_unlock(&hci_cb_list_lock);
2088
2089         return 0;
2090 }
2091 EXPORT_SYMBOL(hci_register_cb);
2092
2093 int hci_unregister_cb(struct hci_cb *cb)
2094 {
2095         BT_DBG("%p name %s", cb, cb->name);
2096
2097         write_lock(&hci_cb_list_lock);
2098         list_del(&cb->list);
2099         write_unlock(&hci_cb_list_lock);
2100
2101         return 0;
2102 }
2103 EXPORT_SYMBOL(hci_unregister_cb);
2104
2105 static int hci_send_frame(struct sk_buff *skb)
2106 {
2107         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2108
2109         if (!hdev) {
2110                 kfree_skb(skb);
2111                 return -ENODEV;
2112         }
2113
2114         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2115
2116         /* Time stamp */
2117         __net_timestamp(skb);
2118
2119         /* Send copy to monitor */
2120         hci_send_to_monitor(hdev, skb);
2121
2122         if (atomic_read(&hdev->promisc)) {
2123                 /* Send copy to the sockets */
2124                 hci_send_to_sock(hdev, skb);
2125         }
2126
2127         /* Get rid of skb owner, prior to sending to the driver. */
2128         skb_orphan(skb);
2129
2130         return hdev->send(skb);
2131 }
2132
2133 /* Send HCI command */
2134 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2135 {
2136         int len = HCI_COMMAND_HDR_SIZE + plen;
2137         struct hci_command_hdr *hdr;
2138         struct sk_buff *skb;
2139
2140         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2141
2142         skb = bt_skb_alloc(len, GFP_ATOMIC);
2143         if (!skb) {
2144                 BT_ERR("%s no memory for command", hdev->name);
2145                 return -ENOMEM;
2146         }
2147
2148         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2149         hdr->opcode = cpu_to_le16(opcode);
2150         hdr->plen   = plen;
2151
2152         if (plen)
2153                 memcpy(skb_put(skb, plen), param, plen);
2154
2155         BT_DBG("skb len %d", skb->len);
2156
2157         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2158         skb->dev = (void *) hdev;
2159
2160         if (test_bit(HCI_INIT, &hdev->flags))
2161                 hdev->init_last_cmd = opcode;
2162
2163         skb_queue_tail(&hdev->cmd_q, skb);
2164         queue_work(hdev->workqueue, &hdev->cmd_work);
2165
2166         return 0;
2167 }
2168
2169 /* Get data from the previously sent command */
2170 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2171 {
2172         struct hci_command_hdr *hdr;
2173
2174         if (!hdev->sent_cmd)
2175                 return NULL;
2176
2177         hdr = (void *) hdev->sent_cmd->data;
2178
2179         if (hdr->opcode != cpu_to_le16(opcode))
2180                 return NULL;
2181
2182         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2183
2184         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2185 }
2186
2187 /* Send ACL data */
2188 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2189 {
2190         struct hci_acl_hdr *hdr;
2191         int len = skb->len;
2192
2193         skb_push(skb, HCI_ACL_HDR_SIZE);
2194         skb_reset_transport_header(skb);
2195         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2196         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2197         hdr->dlen   = cpu_to_le16(len);
2198 }
2199
2200 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2201                                 struct sk_buff *skb, __u16 flags)
2202 {
2203         struct hci_dev *hdev = conn->hdev;
2204         struct sk_buff *list;
2205
2206         list = skb_shinfo(skb)->frag_list;
2207         if (!list) {
2208                 /* Non fragmented */
2209                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2210
2211                 skb_queue_tail(queue, skb);
2212         } else {
2213                 /* Fragmented */
2214                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2215
2216                 skb_shinfo(skb)->frag_list = NULL;
2217
2218                 /* Queue all fragments atomically */
2219                 spin_lock(&queue->lock);
2220
2221                 __skb_queue_tail(queue, skb);
2222
2223                 flags &= ~ACL_START;
2224                 flags |= ACL_CONT;
2225                 do {
2226                         skb = list; list = list->next;
2227
2228                         skb->dev = (void *) hdev;
2229                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2230                         hci_add_acl_hdr(skb, conn->handle, flags);
2231
2232                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2233
2234                         __skb_queue_tail(queue, skb);
2235                 } while (list);
2236
2237                 spin_unlock(&queue->lock);
2238         }
2239 }
2240
2241 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2242 {
2243         struct hci_conn *conn = chan->conn;
2244         struct hci_dev *hdev = conn->hdev;
2245
2246         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2247
2248         skb->dev = (void *) hdev;
2249         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2250         hci_add_acl_hdr(skb, conn->handle, flags);
2251
2252         hci_queue_acl(conn, &chan->data_q, skb, flags);
2253
2254         queue_work(hdev->workqueue, &hdev->tx_work);
2255 }
2256 EXPORT_SYMBOL(hci_send_acl);
2257
2258 /* Send SCO data */
2259 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2260 {
2261         struct hci_dev *hdev = conn->hdev;
2262         struct hci_sco_hdr hdr;
2263
2264         BT_DBG("%s len %d", hdev->name, skb->len);
2265
2266         hdr.handle = cpu_to_le16(conn->handle);
2267         hdr.dlen   = skb->len;
2268
2269         skb_push(skb, HCI_SCO_HDR_SIZE);
2270         skb_reset_transport_header(skb);
2271         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2272
2273         skb->dev = (void *) hdev;
2274         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2275
2276         skb_queue_tail(&conn->data_q, skb);
2277         queue_work(hdev->workqueue, &hdev->tx_work);
2278 }
2279 EXPORT_SYMBOL(hci_send_sco);
2280
2281 /* ---- HCI TX task (outgoing data) ---- */
2282
2283 /* HCI Connection scheduler */
2284 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2285 {
2286         struct hci_conn_hash *h = &hdev->conn_hash;
2287         struct hci_conn *conn = NULL, *c;
2288         int num = 0, min = ~0;
2289
2290         /* We don't have to lock device here. Connections are always
2291          * added and removed with TX task disabled. */
2292
2293         rcu_read_lock();
2294
2295         list_for_each_entry_rcu(c, &h->list, list) {
2296                 if (c->type != type || skb_queue_empty(&c->data_q))
2297                         continue;
2298
2299                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2300                         continue;
2301
2302                 num++;
2303
2304                 if (c->sent < min) {
2305                         min  = c->sent;
2306                         conn = c;
2307                 }
2308
2309                 if (hci_conn_num(hdev, type) == num)
2310                         break;
2311         }
2312
2313         rcu_read_unlock();
2314
2315         if (conn) {
2316                 int cnt, q;
2317
2318                 switch (conn->type) {
2319                 case ACL_LINK:
2320                         cnt = hdev->acl_cnt;
2321                         break;
2322                 case SCO_LINK:
2323                 case ESCO_LINK:
2324                         cnt = hdev->sco_cnt;
2325                         break;
2326                 case LE_LINK:
2327                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2328                         break;
2329                 default:
2330                         cnt = 0;
2331                         BT_ERR("Unknown link type");
2332                 }
2333
2334                 q = cnt / num;
2335                 *quote = q ? q : 1;
2336         } else
2337                 *quote = 0;
2338
2339         BT_DBG("conn %p quote %d", conn, *quote);
2340         return conn;
2341 }
2342
2343 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2344 {
2345         struct hci_conn_hash *h = &hdev->conn_hash;
2346         struct hci_conn *c;
2347
2348         BT_ERR("%s link tx timeout", hdev->name);
2349
2350         rcu_read_lock();
2351
2352         /* Kill stalled connections */
2353         list_for_each_entry_rcu(c, &h->list, list) {
2354                 if (c->type == type && c->sent) {
2355                         BT_ERR("%s killing stalled connection %s",
2356                                 hdev->name, batostr(&c->dst));
2357                         hci_acl_disconn(c, 0x13);
2358                 }
2359         }
2360
2361         rcu_read_unlock();
2362 }
2363
2364 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2365                                                 int *quote)
2366 {
2367         struct hci_conn_hash *h = &hdev->conn_hash;
2368         struct hci_chan *chan = NULL;
2369         int num = 0, min = ~0, cur_prio = 0;
2370         struct hci_conn *conn;
2371         int cnt, q, conn_num = 0;
2372
2373         BT_DBG("%s", hdev->name);
2374
2375         rcu_read_lock();
2376
2377         list_for_each_entry_rcu(conn, &h->list, list) {
2378                 struct hci_chan *tmp;
2379
2380                 if (conn->type != type)
2381                         continue;
2382
2383                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2384                         continue;
2385
2386                 conn_num++;
2387
2388                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2389                         struct sk_buff *skb;
2390
2391                         if (skb_queue_empty(&tmp->data_q))
2392                                 continue;
2393
2394                         skb = skb_peek(&tmp->data_q);
2395                         if (skb->priority < cur_prio)
2396                                 continue;
2397
2398                         if (skb->priority > cur_prio) {
2399                                 num = 0;
2400                                 min = ~0;
2401                                 cur_prio = skb->priority;
2402                         }
2403
2404                         num++;
2405
2406                         if (conn->sent < min) {
2407                                 min  = conn->sent;
2408                                 chan = tmp;
2409                         }
2410                 }
2411
2412                 if (hci_conn_num(hdev, type) == conn_num)
2413                         break;
2414         }
2415
2416         rcu_read_unlock();
2417
2418         if (!chan)
2419                 return NULL;
2420
2421         switch (chan->conn->type) {
2422         case ACL_LINK:
2423                 cnt = hdev->acl_cnt;
2424                 break;
2425         case SCO_LINK:
2426         case ESCO_LINK:
2427                 cnt = hdev->sco_cnt;
2428                 break;
2429         case LE_LINK:
2430                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2431                 break;
2432         default:
2433                 cnt = 0;
2434                 BT_ERR("Unknown link type");
2435         }
2436
2437         q = cnt / num;
2438         *quote = q ? q : 1;
2439         BT_DBG("chan %p quote %d", chan, *quote);
2440         return chan;
2441 }
2442
2443 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2444 {
2445         struct hci_conn_hash *h = &hdev->conn_hash;
2446         struct hci_conn *conn;
2447         int num = 0;
2448
2449         BT_DBG("%s", hdev->name);
2450
2451         rcu_read_lock();
2452
2453         list_for_each_entry_rcu(conn, &h->list, list) {
2454                 struct hci_chan *chan;
2455
2456                 if (conn->type != type)
2457                         continue;
2458
2459                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2460                         continue;
2461
2462                 num++;
2463
2464                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2465                         struct sk_buff *skb;
2466
2467                         if (chan->sent) {
2468                                 chan->sent = 0;
2469                                 continue;
2470                         }
2471
2472                         if (skb_queue_empty(&chan->data_q))
2473                                 continue;
2474
2475                         skb = skb_peek(&chan->data_q);
2476                         if (skb->priority >= HCI_PRIO_MAX - 1)
2477                                 continue;
2478
2479                         skb->priority = HCI_PRIO_MAX - 1;
2480
2481                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2482                                                                 skb->priority);
2483                 }
2484
2485                 if (hci_conn_num(hdev, type) == num)
2486                         break;
2487         }
2488
2489         rcu_read_unlock();
2490
2491 }
2492
2493 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2494 {
2495         /* Calculate count of blocks used by this packet */
2496         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2497 }
2498
2499 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2500 {
2501         if (!test_bit(HCI_RAW, &hdev->flags)) {
2502                 /* ACL tx timeout must be longer than maximum
2503                  * link supervision timeout (40.9 seconds) */
2504                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2505                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2506                         hci_link_tx_to(hdev, ACL_LINK);
2507         }
2508 }
2509
2510 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2511 {
2512         unsigned int cnt = hdev->acl_cnt;
2513         struct hci_chan *chan;
2514         struct sk_buff *skb;
2515         int quote;
2516
2517         __check_timeout(hdev, cnt);
2518
2519         while (hdev->acl_cnt &&
2520                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2521                 u32 priority = (skb_peek(&chan->data_q))->priority;
2522                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2523                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2524                                         skb->len, skb->priority);
2525
2526                         /* Stop if priority has changed */
2527                         if (skb->priority < priority)
2528                                 break;
2529
2530                         skb = skb_dequeue(&chan->data_q);
2531
2532                         hci_conn_enter_active_mode(chan->conn,
2533                                                 bt_cb(skb)->force_active);
2534
2535                         hci_send_frame(skb);
2536                         hdev->acl_last_tx = jiffies;
2537
2538                         hdev->acl_cnt--;
2539                         chan->sent++;
2540                         chan->conn->sent++;
2541                 }
2542         }
2543
2544         if (cnt != hdev->acl_cnt)
2545                 hci_prio_recalculate(hdev, ACL_LINK);
2546 }
2547
2548 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2549 {
2550         unsigned int cnt = hdev->block_cnt;
2551         struct hci_chan *chan;
2552         struct sk_buff *skb;
2553         int quote;
2554
2555         __check_timeout(hdev, cnt);
2556
2557         while (hdev->block_cnt > 0 &&
2558                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2559                 u32 priority = (skb_peek(&chan->data_q))->priority;
2560                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2561                         int blocks;
2562
2563                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2564                                                 skb->len, skb->priority);
2565
2566                         /* Stop if priority has changed */
2567                         if (skb->priority < priority)
2568                                 break;
2569
2570                         skb = skb_dequeue(&chan->data_q);
2571
2572                         blocks = __get_blocks(hdev, skb);
2573                         if (blocks > hdev->block_cnt)
2574                                 return;
2575
2576                         hci_conn_enter_active_mode(chan->conn,
2577                                                 bt_cb(skb)->force_active);
2578
2579                         hci_send_frame(skb);
2580                         hdev->acl_last_tx = jiffies;
2581
2582                         hdev->block_cnt -= blocks;
2583                         quote -= blocks;
2584
2585                         chan->sent += blocks;
2586                         chan->conn->sent += blocks;
2587                 }
2588         }
2589
2590         if (cnt != hdev->block_cnt)
2591                 hci_prio_recalculate(hdev, ACL_LINK);
2592 }
2593
2594 static inline void hci_sched_acl(struct hci_dev *hdev)
2595 {
2596         BT_DBG("%s", hdev->name);
2597
2598         if (!hci_conn_num(hdev, ACL_LINK))
2599                 return;
2600
2601         switch (hdev->flow_ctl_mode) {
2602         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2603                 hci_sched_acl_pkt(hdev);
2604                 break;
2605
2606         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2607                 hci_sched_acl_blk(hdev);
2608                 break;
2609         }
2610 }
2611
2612 /* Schedule SCO */
2613 static inline void hci_sched_sco(struct hci_dev *hdev)
2614 {
2615         struct hci_conn *conn;
2616         struct sk_buff *skb;
2617         int quote;
2618
2619         BT_DBG("%s", hdev->name);
2620
2621         if (!hci_conn_num(hdev, SCO_LINK))
2622                 return;
2623
2624         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2625                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2626                         BT_DBG("skb %p len %d", skb, skb->len);
2627                         hci_send_frame(skb);
2628
2629                         conn->sent++;
2630                         if (conn->sent == ~0)
2631                                 conn->sent = 0;
2632                 }
2633         }
2634 }
2635
2636 static inline void hci_sched_esco(struct hci_dev *hdev)
2637 {
2638         struct hci_conn *conn;
2639         struct sk_buff *skb;
2640         int quote;
2641
2642         BT_DBG("%s", hdev->name);
2643
2644         if (!hci_conn_num(hdev, ESCO_LINK))
2645                 return;
2646
2647         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2648                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2649                         BT_DBG("skb %p len %d", skb, skb->len);
2650                         hci_send_frame(skb);
2651
2652                         conn->sent++;
2653                         if (conn->sent == ~0)
2654                                 conn->sent = 0;
2655                 }
2656         }
2657 }
2658
2659 static inline void hci_sched_le(struct hci_dev *hdev)
2660 {
2661         struct hci_chan *chan;
2662         struct sk_buff *skb;
2663         int quote, cnt, tmp;
2664
2665         BT_DBG("%s", hdev->name);
2666
2667         if (!hci_conn_num(hdev, LE_LINK))
2668                 return;
2669
2670         if (!test_bit(HCI_RAW, &hdev->flags)) {
2671                 /* LE tx timeout must be longer than maximum
2672                  * link supervision timeout (40.9 seconds) */
2673                 if (!hdev->le_cnt && hdev->le_pkts &&
2674                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2675                         hci_link_tx_to(hdev, LE_LINK);
2676         }
2677
2678         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2679         tmp = cnt;
2680         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2681                 u32 priority = (skb_peek(&chan->data_q))->priority;
2682                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2683                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2684                                         skb->len, skb->priority);
2685
2686                         /* Stop if priority has changed */
2687                         if (skb->priority < priority)
2688                                 break;
2689
2690                         skb = skb_dequeue(&chan->data_q);
2691
2692                         hci_send_frame(skb);
2693                         hdev->le_last_tx = jiffies;
2694
2695                         cnt--;
2696                         chan->sent++;
2697                         chan->conn->sent++;
2698                 }
2699         }
2700
2701         if (hdev->le_pkts)
2702                 hdev->le_cnt = cnt;
2703         else
2704                 hdev->acl_cnt = cnt;
2705
2706         if (cnt != tmp)
2707                 hci_prio_recalculate(hdev, LE_LINK);
2708 }
2709
2710 static void hci_tx_work(struct work_struct *work)
2711 {
2712         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2713         struct sk_buff *skb;
2714
2715         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2716                 hdev->sco_cnt, hdev->le_cnt);
2717
2718         /* Schedule queues and send stuff to HCI driver */
2719
2720         hci_sched_acl(hdev);
2721
2722         hci_sched_sco(hdev);
2723
2724         hci_sched_esco(hdev);
2725
2726         hci_sched_le(hdev);
2727
2728         /* Send next queued raw (unknown type) packet */
2729         while ((skb = skb_dequeue(&hdev->raw_q)))
2730                 hci_send_frame(skb);
2731 }
2732
2733 /* ----- HCI RX task (incoming data processing) ----- */
2734
2735 /* ACL data packet */
2736 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2737 {
2738         struct hci_acl_hdr *hdr = (void *) skb->data;
2739         struct hci_conn *conn;
2740         __u16 handle, flags;
2741
2742         skb_pull(skb, HCI_ACL_HDR_SIZE);
2743
2744         handle = __le16_to_cpu(hdr->handle);
2745         flags  = hci_flags(handle);
2746         handle = hci_handle(handle);
2747
2748         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2749
2750         hdev->stat.acl_rx++;
2751
2752         hci_dev_lock(hdev);
2753         conn = hci_conn_hash_lookup_handle(hdev, handle);
2754         hci_dev_unlock(hdev);
2755
2756         if (conn) {
2757                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2758
2759                 /* Send to upper protocol */
2760                 l2cap_recv_acldata(conn, skb, flags);
2761                 return;
2762         } else {
2763                 BT_ERR("%s ACL packet for unknown connection handle %d",
2764                         hdev->name, handle);
2765         }
2766
2767         kfree_skb(skb);
2768 }
2769
2770 /* SCO data packet */
2771 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2772 {
2773         struct hci_sco_hdr *hdr = (void *) skb->data;
2774         struct hci_conn *conn;
2775         __u16 handle;
2776
2777         skb_pull(skb, HCI_SCO_HDR_SIZE);
2778
2779         handle = __le16_to_cpu(hdr->handle);
2780
2781         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2782
2783         hdev->stat.sco_rx++;
2784
2785         hci_dev_lock(hdev);
2786         conn = hci_conn_hash_lookup_handle(hdev, handle);
2787         hci_dev_unlock(hdev);
2788
2789         if (conn) {
2790                 /* Send to upper protocol */
2791                 sco_recv_scodata(conn, skb);
2792                 return;
2793         } else {
2794                 BT_ERR("%s SCO packet for unknown connection handle %d",
2795                         hdev->name, handle);
2796         }
2797
2798         kfree_skb(skb);
2799 }
2800
2801 static void hci_rx_work(struct work_struct *work)
2802 {
2803         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2804         struct sk_buff *skb;
2805
2806         BT_DBG("%s", hdev->name);
2807
2808         while ((skb = skb_dequeue(&hdev->rx_q))) {
2809                 /* Send copy to monitor */
2810                 hci_send_to_monitor(hdev, skb);
2811
2812                 if (atomic_read(&hdev->promisc)) {
2813                         /* Send copy to the sockets */
2814                         hci_send_to_sock(hdev, skb);
2815                 }
2816
2817                 if (test_bit(HCI_RAW, &hdev->flags)) {
2818                         kfree_skb(skb);
2819                         continue;
2820                 }
2821
2822                 if (test_bit(HCI_INIT, &hdev->flags)) {
2823                         /* Don't process data packets in this states. */
2824                         switch (bt_cb(skb)->pkt_type) {
2825                         case HCI_ACLDATA_PKT:
2826                         case HCI_SCODATA_PKT:
2827                                 kfree_skb(skb);
2828                                 continue;
2829                         }
2830                 }
2831
2832                 /* Process frame */
2833                 switch (bt_cb(skb)->pkt_type) {
2834                 case HCI_EVENT_PKT:
2835                         BT_DBG("%s Event packet", hdev->name);
2836                         hci_event_packet(hdev, skb);
2837                         break;
2838
2839                 case HCI_ACLDATA_PKT:
2840                         BT_DBG("%s ACL data packet", hdev->name);
2841                         hci_acldata_packet(hdev, skb);
2842                         break;
2843
2844                 case HCI_SCODATA_PKT:
2845                         BT_DBG("%s SCO data packet", hdev->name);
2846                         hci_scodata_packet(hdev, skb);
2847                         break;
2848
2849                 default:
2850                         kfree_skb(skb);
2851                         break;
2852                 }
2853         }
2854 }
2855
2856 static void hci_cmd_work(struct work_struct *work)
2857 {
2858         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2859         struct sk_buff *skb;
2860
2861         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2862
2863         /* Send queued commands */
2864         if (atomic_read(&hdev->cmd_cnt)) {
2865                 skb = skb_dequeue(&hdev->cmd_q);
2866                 if (!skb)
2867                         return;
2868
2869                 kfree_skb(hdev->sent_cmd);
2870
2871                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2872                 if (hdev->sent_cmd) {
2873                         atomic_dec(&hdev->cmd_cnt);
2874                         hci_send_frame(skb);
2875                         if (test_bit(HCI_RESET, &hdev->flags))
2876                                 del_timer(&hdev->cmd_timer);
2877                         else
2878                                 mod_timer(&hdev->cmd_timer,
2879                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2880                 } else {
2881                         skb_queue_head(&hdev->cmd_q, skb);
2882                         queue_work(hdev->workqueue, &hdev->cmd_work);
2883                 }
2884         }
2885 }
2886
2887 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2888 {
2889         /* General inquiry access code (GIAC) */
2890         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2891         struct hci_cp_inquiry cp;
2892
2893         BT_DBG("%s", hdev->name);
2894
2895         if (test_bit(HCI_INQUIRY, &hdev->flags))
2896                 return -EINPROGRESS;
2897
2898         inquiry_cache_flush(hdev);
2899
2900         memset(&cp, 0, sizeof(cp));
2901         memcpy(&cp.lap, lap, sizeof(cp.lap));
2902         cp.length  = length;
2903
2904         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2905 }
2906
2907 int hci_cancel_inquiry(struct hci_dev *hdev)
2908 {
2909         BT_DBG("%s", hdev->name);
2910
2911         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2912                 return -EPERM;
2913
2914         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2915 }