]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Bluetooth: Add support for setting SSP debug mode
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
33
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
36
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
40
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
44
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
48
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
51
52 /* ---- HCI notifications ---- */
53
54 static void hci_notify(struct hci_dev *hdev, int event)
55 {
56         hci_sock_dev_event(hdev, event);
57 }
58
59 /* ---- HCI debugfs entries ---- */
60
61 static int features_show(struct seq_file *f, void *ptr)
62 {
63         struct hci_dev *hdev = f->private;
64         u8 p;
65
66         hci_dev_lock(hdev);
67         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70                            hdev->features[p][0], hdev->features[p][1],
71                            hdev->features[p][2], hdev->features[p][3],
72                            hdev->features[p][4], hdev->features[p][5],
73                            hdev->features[p][6], hdev->features[p][7]);
74         }
75         if (lmp_le_capable(hdev))
76                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
77                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
78                            hdev->le_features[0], hdev->le_features[1],
79                            hdev->le_features[2], hdev->le_features[3],
80                            hdev->le_features[4], hdev->le_features[5],
81                            hdev->le_features[6], hdev->le_features[7]);
82         hci_dev_unlock(hdev);
83
84         return 0;
85 }
86
87 static int features_open(struct inode *inode, struct file *file)
88 {
89         return single_open(file, features_show, inode->i_private);
90 }
91
92 static const struct file_operations features_fops = {
93         .open           = features_open,
94         .read           = seq_read,
95         .llseek         = seq_lseek,
96         .release        = single_release,
97 };
98
99 static int blacklist_show(struct seq_file *f, void *p)
100 {
101         struct hci_dev *hdev = f->private;
102         struct bdaddr_list *b;
103
104         hci_dev_lock(hdev);
105         list_for_each_entry(b, &hdev->blacklist, list)
106                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
107         hci_dev_unlock(hdev);
108
109         return 0;
110 }
111
112 static int blacklist_open(struct inode *inode, struct file *file)
113 {
114         return single_open(file, blacklist_show, inode->i_private);
115 }
116
117 static const struct file_operations blacklist_fops = {
118         .open           = blacklist_open,
119         .read           = seq_read,
120         .llseek         = seq_lseek,
121         .release        = single_release,
122 };
123
124 static int uuids_show(struct seq_file *f, void *p)
125 {
126         struct hci_dev *hdev = f->private;
127         struct bt_uuid *uuid;
128
129         hci_dev_lock(hdev);
130         list_for_each_entry(uuid, &hdev->uuids, list) {
131                 u32 data0, data5;
132                 u16 data1, data2, data3, data4;
133
134                 data5 = get_unaligned_le32(uuid);
135                 data4 = get_unaligned_le16(uuid + 4);
136                 data3 = get_unaligned_le16(uuid + 6);
137                 data2 = get_unaligned_le16(uuid + 8);
138                 data1 = get_unaligned_le16(uuid + 10);
139                 data0 = get_unaligned_le32(uuid + 12);
140
141                 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
142                            data0, data1, data2, data3, data4, data5);
143         }
144         hci_dev_unlock(hdev);
145
146         return 0;
147 }
148
149 static int uuids_open(struct inode *inode, struct file *file)
150 {
151         return single_open(file, uuids_show, inode->i_private);
152 }
153
154 static const struct file_operations uuids_fops = {
155         .open           = uuids_open,
156         .read           = seq_read,
157         .llseek         = seq_lseek,
158         .release        = single_release,
159 };
160
161 static int inquiry_cache_show(struct seq_file *f, void *p)
162 {
163         struct hci_dev *hdev = f->private;
164         struct discovery_state *cache = &hdev->discovery;
165         struct inquiry_entry *e;
166
167         hci_dev_lock(hdev);
168
169         list_for_each_entry(e, &cache->all, all) {
170                 struct inquiry_data *data = &e->data;
171                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
172                            &data->bdaddr,
173                            data->pscan_rep_mode, data->pscan_period_mode,
174                            data->pscan_mode, data->dev_class[2],
175                            data->dev_class[1], data->dev_class[0],
176                            __le16_to_cpu(data->clock_offset),
177                            data->rssi, data->ssp_mode, e->timestamp);
178         }
179
180         hci_dev_unlock(hdev);
181
182         return 0;
183 }
184
185 static int inquiry_cache_open(struct inode *inode, struct file *file)
186 {
187         return single_open(file, inquiry_cache_show, inode->i_private);
188 }
189
190 static const struct file_operations inquiry_cache_fops = {
191         .open           = inquiry_cache_open,
192         .read           = seq_read,
193         .llseek         = seq_lseek,
194         .release        = single_release,
195 };
196
197 static int link_keys_show(struct seq_file *f, void *ptr)
198 {
199         struct hci_dev *hdev = f->private;
200         struct list_head *p, *n;
201
202         hci_dev_lock(hdev);
203         list_for_each_safe(p, n, &hdev->link_keys) {
204                 struct link_key *key = list_entry(p, struct link_key, list);
205                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
206                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
207         }
208         hci_dev_unlock(hdev);
209
210         return 0;
211 }
212
213 static int link_keys_open(struct inode *inode, struct file *file)
214 {
215         return single_open(file, link_keys_show, inode->i_private);
216 }
217
218 static const struct file_operations link_keys_fops = {
219         .open           = link_keys_open,
220         .read           = seq_read,
221         .llseek         = seq_lseek,
222         .release        = single_release,
223 };
224
225 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
226                                    size_t count, loff_t *ppos)
227 {
228         struct hci_dev *hdev = file->private_data;
229         char buf[3];
230
231         buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
232         buf[1] = '\n';
233         buf[2] = '\0';
234         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
235 }
236
237 static const struct file_operations use_debug_keys_fops = {
238         .open           = simple_open,
239         .read           = use_debug_keys_read,
240         .llseek         = default_llseek,
241 };
242
243 static int dev_class_show(struct seq_file *f, void *ptr)
244 {
245         struct hci_dev *hdev = f->private;
246
247         hci_dev_lock(hdev);
248         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
249                    hdev->dev_class[1], hdev->dev_class[0]);
250         hci_dev_unlock(hdev);
251
252         return 0;
253 }
254
255 static int dev_class_open(struct inode *inode, struct file *file)
256 {
257         return single_open(file, dev_class_show, inode->i_private);
258 }
259
260 static const struct file_operations dev_class_fops = {
261         .open           = dev_class_open,
262         .read           = seq_read,
263         .llseek         = seq_lseek,
264         .release        = single_release,
265 };
266
267 static int voice_setting_get(void *data, u64 *val)
268 {
269         struct hci_dev *hdev = data;
270
271         hci_dev_lock(hdev);
272         *val = hdev->voice_setting;
273         hci_dev_unlock(hdev);
274
275         return 0;
276 }
277
278 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
279                         NULL, "0x%4.4llx\n");
280
281 static int auto_accept_delay_set(void *data, u64 val)
282 {
283         struct hci_dev *hdev = data;
284
285         hci_dev_lock(hdev);
286         hdev->auto_accept_delay = val;
287         hci_dev_unlock(hdev);
288
289         return 0;
290 }
291
292 static int auto_accept_delay_get(void *data, u64 *val)
293 {
294         struct hci_dev *hdev = data;
295
296         hci_dev_lock(hdev);
297         *val = hdev->auto_accept_delay;
298         hci_dev_unlock(hdev);
299
300         return 0;
301 }
302
303 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
304                         auto_accept_delay_set, "%llu\n");
305
306 static int ssp_debug_mode_set(void *data, u64 val)
307 {
308         struct hci_dev *hdev = data;
309         struct sk_buff *skb;
310         __u8 mode;
311         int err;
312
313         if (val != 0 && val != 1)
314                 return -EINVAL;
315
316         if (!test_bit(HCI_UP, &hdev->flags))
317                 return -ENETDOWN;
318
319         hci_req_lock(hdev);
320         mode = val;
321         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
322                              &mode, HCI_CMD_TIMEOUT);
323         hci_req_unlock(hdev);
324
325         if (IS_ERR(skb))
326                 return PTR_ERR(skb);
327
328         err = -bt_to_errno(skb->data[0]);
329         kfree_skb(skb);
330
331         if (err < 0)
332                 return err;
333
334         hci_dev_lock(hdev);
335         hdev->ssp_debug_mode = val;
336         hci_dev_unlock(hdev);
337
338         return 0;
339 }
340
341 static int ssp_debug_mode_get(void *data, u64 *val)
342 {
343         struct hci_dev *hdev = data;
344
345         hci_dev_lock(hdev);
346         *val = hdev->ssp_debug_mode;
347         hci_dev_unlock(hdev);
348
349         return 0;
350 }
351
352 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
353                         ssp_debug_mode_set, "%llu\n");
354
355 static int idle_timeout_set(void *data, u64 val)
356 {
357         struct hci_dev *hdev = data;
358
359         if (val != 0 && (val < 500 || val > 3600000))
360                 return -EINVAL;
361
362         hci_dev_lock(hdev);
363         hdev->idle_timeout= val;
364         hci_dev_unlock(hdev);
365
366         return 0;
367 }
368
369 static int idle_timeout_get(void *data, u64 *val)
370 {
371         struct hci_dev *hdev = data;
372
373         hci_dev_lock(hdev);
374         *val = hdev->idle_timeout;
375         hci_dev_unlock(hdev);
376
377         return 0;
378 }
379
380 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
381                         idle_timeout_set, "%llu\n");
382
383 static int sniff_min_interval_set(void *data, u64 val)
384 {
385         struct hci_dev *hdev = data;
386
387         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
388                 return -EINVAL;
389
390         hci_dev_lock(hdev);
391         hdev->sniff_min_interval= val;
392         hci_dev_unlock(hdev);
393
394         return 0;
395 }
396
397 static int sniff_min_interval_get(void *data, u64 *val)
398 {
399         struct hci_dev *hdev = data;
400
401         hci_dev_lock(hdev);
402         *val = hdev->sniff_min_interval;
403         hci_dev_unlock(hdev);
404
405         return 0;
406 }
407
408 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
409                         sniff_min_interval_set, "%llu\n");
410
411 static int sniff_max_interval_set(void *data, u64 val)
412 {
413         struct hci_dev *hdev = data;
414
415         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
416                 return -EINVAL;
417
418         hci_dev_lock(hdev);
419         hdev->sniff_max_interval= val;
420         hci_dev_unlock(hdev);
421
422         return 0;
423 }
424
425 static int sniff_max_interval_get(void *data, u64 *val)
426 {
427         struct hci_dev *hdev = data;
428
429         hci_dev_lock(hdev);
430         *val = hdev->sniff_max_interval;
431         hci_dev_unlock(hdev);
432
433         return 0;
434 }
435
436 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
437                         sniff_max_interval_set, "%llu\n");
438
439 static int static_address_show(struct seq_file *f, void *p)
440 {
441         struct hci_dev *hdev = f->private;
442
443         hci_dev_lock(hdev);
444         seq_printf(f, "%pMR\n", &hdev->static_addr);
445         hci_dev_unlock(hdev);
446
447         return 0;
448 }
449
450 static int static_address_open(struct inode *inode, struct file *file)
451 {
452         return single_open(file, static_address_show, inode->i_private);
453 }
454
455 static const struct file_operations static_address_fops = {
456         .open           = static_address_open,
457         .read           = seq_read,
458         .llseek         = seq_lseek,
459         .release        = single_release,
460 };
461
462 static int own_address_type_set(void *data, u64 val)
463 {
464         struct hci_dev *hdev = data;
465
466         if (val != 0 && val != 1)
467                 return -EINVAL;
468
469         hci_dev_lock(hdev);
470         hdev->own_addr_type = val;
471         hci_dev_unlock(hdev);
472
473         return 0;
474 }
475
476 static int own_address_type_get(void *data, u64 *val)
477 {
478         struct hci_dev *hdev = data;
479
480         hci_dev_lock(hdev);
481         *val = hdev->own_addr_type;
482         hci_dev_unlock(hdev);
483
484         return 0;
485 }
486
487 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
488                         own_address_type_set, "%llu\n");
489
490 static int long_term_keys_show(struct seq_file *f, void *ptr)
491 {
492         struct hci_dev *hdev = f->private;
493         struct list_head *p, *n;
494
495         hci_dev_lock(hdev);
496         list_for_each_safe(p, n, &hdev->link_keys) {
497                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
498                 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
499                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
500                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
501                            8, ltk->rand, 16, ltk->val);
502         }
503         hci_dev_unlock(hdev);
504
505         return 0;
506 }
507
508 static int long_term_keys_open(struct inode *inode, struct file *file)
509 {
510         return single_open(file, long_term_keys_show, inode->i_private);
511 }
512
513 static const struct file_operations long_term_keys_fops = {
514         .open           = long_term_keys_open,
515         .read           = seq_read,
516         .llseek         = seq_lseek,
517         .release        = single_release,
518 };
519
520 /* ---- HCI requests ---- */
521
522 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
523 {
524         BT_DBG("%s result 0x%2.2x", hdev->name, result);
525
526         if (hdev->req_status == HCI_REQ_PEND) {
527                 hdev->req_result = result;
528                 hdev->req_status = HCI_REQ_DONE;
529                 wake_up_interruptible(&hdev->req_wait_q);
530         }
531 }
532
533 static void hci_req_cancel(struct hci_dev *hdev, int err)
534 {
535         BT_DBG("%s err 0x%2.2x", hdev->name, err);
536
537         if (hdev->req_status == HCI_REQ_PEND) {
538                 hdev->req_result = err;
539                 hdev->req_status = HCI_REQ_CANCELED;
540                 wake_up_interruptible(&hdev->req_wait_q);
541         }
542 }
543
544 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
545                                             u8 event)
546 {
547         struct hci_ev_cmd_complete *ev;
548         struct hci_event_hdr *hdr;
549         struct sk_buff *skb;
550
551         hci_dev_lock(hdev);
552
553         skb = hdev->recv_evt;
554         hdev->recv_evt = NULL;
555
556         hci_dev_unlock(hdev);
557
558         if (!skb)
559                 return ERR_PTR(-ENODATA);
560
561         if (skb->len < sizeof(*hdr)) {
562                 BT_ERR("Too short HCI event");
563                 goto failed;
564         }
565
566         hdr = (void *) skb->data;
567         skb_pull(skb, HCI_EVENT_HDR_SIZE);
568
569         if (event) {
570                 if (hdr->evt != event)
571                         goto failed;
572                 return skb;
573         }
574
575         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
576                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
577                 goto failed;
578         }
579
580         if (skb->len < sizeof(*ev)) {
581                 BT_ERR("Too short cmd_complete event");
582                 goto failed;
583         }
584
585         ev = (void *) skb->data;
586         skb_pull(skb, sizeof(*ev));
587
588         if (opcode == __le16_to_cpu(ev->opcode))
589                 return skb;
590
591         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
592                __le16_to_cpu(ev->opcode));
593
594 failed:
595         kfree_skb(skb);
596         return ERR_PTR(-ENODATA);
597 }
598
599 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
600                                   const void *param, u8 event, u32 timeout)
601 {
602         DECLARE_WAITQUEUE(wait, current);
603         struct hci_request req;
604         int err = 0;
605
606         BT_DBG("%s", hdev->name);
607
608         hci_req_init(&req, hdev);
609
610         hci_req_add_ev(&req, opcode, plen, param, event);
611
612         hdev->req_status = HCI_REQ_PEND;
613
614         err = hci_req_run(&req, hci_req_sync_complete);
615         if (err < 0)
616                 return ERR_PTR(err);
617
618         add_wait_queue(&hdev->req_wait_q, &wait);
619         set_current_state(TASK_INTERRUPTIBLE);
620
621         schedule_timeout(timeout);
622
623         remove_wait_queue(&hdev->req_wait_q, &wait);
624
625         if (signal_pending(current))
626                 return ERR_PTR(-EINTR);
627
628         switch (hdev->req_status) {
629         case HCI_REQ_DONE:
630                 err = -bt_to_errno(hdev->req_result);
631                 break;
632
633         case HCI_REQ_CANCELED:
634                 err = -hdev->req_result;
635                 break;
636
637         default:
638                 err = -ETIMEDOUT;
639                 break;
640         }
641
642         hdev->req_status = hdev->req_result = 0;
643
644         BT_DBG("%s end: err %d", hdev->name, err);
645
646         if (err < 0)
647                 return ERR_PTR(err);
648
649         return hci_get_cmd_complete(hdev, opcode, event);
650 }
651 EXPORT_SYMBOL(__hci_cmd_sync_ev);
652
653 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
654                                const void *param, u32 timeout)
655 {
656         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
657 }
658 EXPORT_SYMBOL(__hci_cmd_sync);
659
660 /* Execute request and wait for completion. */
661 static int __hci_req_sync(struct hci_dev *hdev,
662                           void (*func)(struct hci_request *req,
663                                       unsigned long opt),
664                           unsigned long opt, __u32 timeout)
665 {
666         struct hci_request req;
667         DECLARE_WAITQUEUE(wait, current);
668         int err = 0;
669
670         BT_DBG("%s start", hdev->name);
671
672         hci_req_init(&req, hdev);
673
674         hdev->req_status = HCI_REQ_PEND;
675
676         func(&req, opt);
677
678         err = hci_req_run(&req, hci_req_sync_complete);
679         if (err < 0) {
680                 hdev->req_status = 0;
681
682                 /* ENODATA means the HCI request command queue is empty.
683                  * This can happen when a request with conditionals doesn't
684                  * trigger any commands to be sent. This is normal behavior
685                  * and should not trigger an error return.
686                  */
687                 if (err == -ENODATA)
688                         return 0;
689
690                 return err;
691         }
692
693         add_wait_queue(&hdev->req_wait_q, &wait);
694         set_current_state(TASK_INTERRUPTIBLE);
695
696         schedule_timeout(timeout);
697
698         remove_wait_queue(&hdev->req_wait_q, &wait);
699
700         if (signal_pending(current))
701                 return -EINTR;
702
703         switch (hdev->req_status) {
704         case HCI_REQ_DONE:
705                 err = -bt_to_errno(hdev->req_result);
706                 break;
707
708         case HCI_REQ_CANCELED:
709                 err = -hdev->req_result;
710                 break;
711
712         default:
713                 err = -ETIMEDOUT;
714                 break;
715         }
716
717         hdev->req_status = hdev->req_result = 0;
718
719         BT_DBG("%s end: err %d", hdev->name, err);
720
721         return err;
722 }
723
724 static int hci_req_sync(struct hci_dev *hdev,
725                         void (*req)(struct hci_request *req,
726                                     unsigned long opt),
727                         unsigned long opt, __u32 timeout)
728 {
729         int ret;
730
731         if (!test_bit(HCI_UP, &hdev->flags))
732                 return -ENETDOWN;
733
734         /* Serialize all requests */
735         hci_req_lock(hdev);
736         ret = __hci_req_sync(hdev, req, opt, timeout);
737         hci_req_unlock(hdev);
738
739         return ret;
740 }
741
742 static void hci_reset_req(struct hci_request *req, unsigned long opt)
743 {
744         BT_DBG("%s %ld", req->hdev->name, opt);
745
746         /* Reset device */
747         set_bit(HCI_RESET, &req->hdev->flags);
748         hci_req_add(req, HCI_OP_RESET, 0, NULL);
749 }
750
751 static void bredr_init(struct hci_request *req)
752 {
753         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
754
755         /* Read Local Supported Features */
756         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
757
758         /* Read Local Version */
759         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
760
761         /* Read BD Address */
762         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
763 }
764
765 static void amp_init(struct hci_request *req)
766 {
767         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
768
769         /* Read Local Version */
770         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
771
772         /* Read Local Supported Commands */
773         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
774
775         /* Read Local Supported Features */
776         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
777
778         /* Read Local AMP Info */
779         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
780
781         /* Read Data Blk size */
782         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
783
784         /* Read Flow Control Mode */
785         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
786
787         /* Read Location Data */
788         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
789 }
790
791 static void hci_init1_req(struct hci_request *req, unsigned long opt)
792 {
793         struct hci_dev *hdev = req->hdev;
794
795         BT_DBG("%s %ld", hdev->name, opt);
796
797         /* Reset */
798         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
799                 hci_reset_req(req, 0);
800
801         switch (hdev->dev_type) {
802         case HCI_BREDR:
803                 bredr_init(req);
804                 break;
805
806         case HCI_AMP:
807                 amp_init(req);
808                 break;
809
810         default:
811                 BT_ERR("Unknown device type %d", hdev->dev_type);
812                 break;
813         }
814 }
815
816 static void bredr_setup(struct hci_request *req)
817 {
818         struct hci_dev *hdev = req->hdev;
819
820         __le16 param;
821         __u8 flt_type;
822
823         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
824         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
825
826         /* Read Class of Device */
827         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
828
829         /* Read Local Name */
830         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
831
832         /* Read Voice Setting */
833         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
834
835         /* Read Number of Supported IAC */
836         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
837
838         /* Read Current IAC LAP */
839         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
840
841         /* Clear Event Filters */
842         flt_type = HCI_FLT_CLEAR_ALL;
843         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
844
845         /* Connection accept timeout ~20 secs */
846         param = __constant_cpu_to_le16(0x7d00);
847         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
848
849         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
850          * but it does not support page scan related HCI commands.
851          */
852         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
853                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
854                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
855         }
856 }
857
858 static void le_setup(struct hci_request *req)
859 {
860         struct hci_dev *hdev = req->hdev;
861
862         /* Read LE Buffer Size */
863         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
864
865         /* Read LE Local Supported Features */
866         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
867
868         /* Read LE Advertising Channel TX Power */
869         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
870
871         /* Read LE White List Size */
872         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
873
874         /* Read LE Supported States */
875         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
876
877         /* LE-only controllers have LE implicitly enabled */
878         if (!lmp_bredr_capable(hdev))
879                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
880 }
881
882 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
883 {
884         if (lmp_ext_inq_capable(hdev))
885                 return 0x02;
886
887         if (lmp_inq_rssi_capable(hdev))
888                 return 0x01;
889
890         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
891             hdev->lmp_subver == 0x0757)
892                 return 0x01;
893
894         if (hdev->manufacturer == 15) {
895                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
896                         return 0x01;
897                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
898                         return 0x01;
899                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
900                         return 0x01;
901         }
902
903         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
904             hdev->lmp_subver == 0x1805)
905                 return 0x01;
906
907         return 0x00;
908 }
909
910 static void hci_setup_inquiry_mode(struct hci_request *req)
911 {
912         u8 mode;
913
914         mode = hci_get_inquiry_mode(req->hdev);
915
916         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
917 }
918
919 static void hci_setup_event_mask(struct hci_request *req)
920 {
921         struct hci_dev *hdev = req->hdev;
922
923         /* The second byte is 0xff instead of 0x9f (two reserved bits
924          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
925          * command otherwise.
926          */
927         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
928
929         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
930          * any event mask for pre 1.2 devices.
931          */
932         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
933                 return;
934
935         if (lmp_bredr_capable(hdev)) {
936                 events[4] |= 0x01; /* Flow Specification Complete */
937                 events[4] |= 0x02; /* Inquiry Result with RSSI */
938                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
939                 events[5] |= 0x08; /* Synchronous Connection Complete */
940                 events[5] |= 0x10; /* Synchronous Connection Changed */
941         } else {
942                 /* Use a different default for LE-only devices */
943                 memset(events, 0, sizeof(events));
944                 events[0] |= 0x10; /* Disconnection Complete */
945                 events[0] |= 0x80; /* Encryption Change */
946                 events[1] |= 0x08; /* Read Remote Version Information Complete */
947                 events[1] |= 0x20; /* Command Complete */
948                 events[1] |= 0x40; /* Command Status */
949                 events[1] |= 0x80; /* Hardware Error */
950                 events[2] |= 0x04; /* Number of Completed Packets */
951                 events[3] |= 0x02; /* Data Buffer Overflow */
952                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
953         }
954
955         if (lmp_inq_rssi_capable(hdev))
956                 events[4] |= 0x02; /* Inquiry Result with RSSI */
957
958         if (lmp_sniffsubr_capable(hdev))
959                 events[5] |= 0x20; /* Sniff Subrating */
960
961         if (lmp_pause_enc_capable(hdev))
962                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
963
964         if (lmp_ext_inq_capable(hdev))
965                 events[5] |= 0x40; /* Extended Inquiry Result */
966
967         if (lmp_no_flush_capable(hdev))
968                 events[7] |= 0x01; /* Enhanced Flush Complete */
969
970         if (lmp_lsto_capable(hdev))
971                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
972
973         if (lmp_ssp_capable(hdev)) {
974                 events[6] |= 0x01;      /* IO Capability Request */
975                 events[6] |= 0x02;      /* IO Capability Response */
976                 events[6] |= 0x04;      /* User Confirmation Request */
977                 events[6] |= 0x08;      /* User Passkey Request */
978                 events[6] |= 0x10;      /* Remote OOB Data Request */
979                 events[6] |= 0x20;      /* Simple Pairing Complete */
980                 events[7] |= 0x04;      /* User Passkey Notification */
981                 events[7] |= 0x08;      /* Keypress Notification */
982                 events[7] |= 0x10;      /* Remote Host Supported
983                                          * Features Notification
984                                          */
985         }
986
987         if (lmp_le_capable(hdev))
988                 events[7] |= 0x20;      /* LE Meta-Event */
989
990         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
991
992         if (lmp_le_capable(hdev)) {
993                 memset(events, 0, sizeof(events));
994                 events[0] = 0x1f;
995                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
996                             sizeof(events), events);
997         }
998 }
999
1000 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1001 {
1002         struct hci_dev *hdev = req->hdev;
1003
1004         if (lmp_bredr_capable(hdev))
1005                 bredr_setup(req);
1006         else
1007                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1008
1009         if (lmp_le_capable(hdev))
1010                 le_setup(req);
1011
1012         hci_setup_event_mask(req);
1013
1014         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1015          * local supported commands HCI command.
1016          */
1017         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1018                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1019
1020         if (lmp_ssp_capable(hdev)) {
1021                 /* When SSP is available, then the host features page
1022                  * should also be available as well. However some
1023                  * controllers list the max_page as 0 as long as SSP
1024                  * has not been enabled. To achieve proper debugging
1025                  * output, force the minimum max_page to 1 at least.
1026                  */
1027                 hdev->max_page = 0x01;
1028
1029                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1030                         u8 mode = 0x01;
1031                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1032                                     sizeof(mode), &mode);
1033                 } else {
1034                         struct hci_cp_write_eir cp;
1035
1036                         memset(hdev->eir, 0, sizeof(hdev->eir));
1037                         memset(&cp, 0, sizeof(cp));
1038
1039                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1040                 }
1041         }
1042
1043         if (lmp_inq_rssi_capable(hdev))
1044                 hci_setup_inquiry_mode(req);
1045
1046         if (lmp_inq_tx_pwr_capable(hdev))
1047                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1048
1049         if (lmp_ext_feat_capable(hdev)) {
1050                 struct hci_cp_read_local_ext_features cp;
1051
1052                 cp.page = 0x01;
1053                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1054                             sizeof(cp), &cp);
1055         }
1056
1057         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1058                 u8 enable = 1;
1059                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1060                             &enable);
1061         }
1062 }
1063
1064 static void hci_setup_link_policy(struct hci_request *req)
1065 {
1066         struct hci_dev *hdev = req->hdev;
1067         struct hci_cp_write_def_link_policy cp;
1068         u16 link_policy = 0;
1069
1070         if (lmp_rswitch_capable(hdev))
1071                 link_policy |= HCI_LP_RSWITCH;
1072         if (lmp_hold_capable(hdev))
1073                 link_policy |= HCI_LP_HOLD;
1074         if (lmp_sniff_capable(hdev))
1075                 link_policy |= HCI_LP_SNIFF;
1076         if (lmp_park_capable(hdev))
1077                 link_policy |= HCI_LP_PARK;
1078
1079         cp.policy = cpu_to_le16(link_policy);
1080         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1081 }
1082
1083 static void hci_set_le_support(struct hci_request *req)
1084 {
1085         struct hci_dev *hdev = req->hdev;
1086         struct hci_cp_write_le_host_supported cp;
1087
1088         /* LE-only devices do not support explicit enablement */
1089         if (!lmp_bredr_capable(hdev))
1090                 return;
1091
1092         memset(&cp, 0, sizeof(cp));
1093
1094         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1095                 cp.le = 0x01;
1096                 cp.simul = lmp_le_br_capable(hdev);
1097         }
1098
1099         if (cp.le != lmp_host_le_capable(hdev))
1100                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1101                             &cp);
1102 }
1103
1104 static void hci_set_event_mask_page_2(struct hci_request *req)
1105 {
1106         struct hci_dev *hdev = req->hdev;
1107         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1108
1109         /* If Connectionless Slave Broadcast master role is supported
1110          * enable all necessary events for it.
1111          */
1112         if (hdev->features[2][0] & 0x01) {
1113                 events[1] |= 0x40;      /* Triggered Clock Capture */
1114                 events[1] |= 0x80;      /* Synchronization Train Complete */
1115                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1116                 events[2] |= 0x20;      /* CSB Channel Map Change */
1117         }
1118
1119         /* If Connectionless Slave Broadcast slave role is supported
1120          * enable all necessary events for it.
1121          */
1122         if (hdev->features[2][0] & 0x02) {
1123                 events[2] |= 0x01;      /* Synchronization Train Received */
1124                 events[2] |= 0x02;      /* CSB Receive */
1125                 events[2] |= 0x04;      /* CSB Timeout */
1126                 events[2] |= 0x08;      /* Truncated Page Complete */
1127         }
1128
1129         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1130 }
1131
1132 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1133 {
1134         struct hci_dev *hdev = req->hdev;
1135         u8 p;
1136
1137         /* Some Broadcom based Bluetooth controllers do not support the
1138          * Delete Stored Link Key command. They are clearly indicating its
1139          * absence in the bit mask of supported commands.
1140          *
1141          * Check the supported commands and only if the the command is marked
1142          * as supported send it. If not supported assume that the controller
1143          * does not have actual support for stored link keys which makes this
1144          * command redundant anyway.
1145          */
1146         if (hdev->commands[6] & 0x80) {
1147                 struct hci_cp_delete_stored_link_key cp;
1148
1149                 bacpy(&cp.bdaddr, BDADDR_ANY);
1150                 cp.delete_all = 0x01;
1151                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1152                             sizeof(cp), &cp);
1153         }
1154
1155         if (hdev->commands[5] & 0x10)
1156                 hci_setup_link_policy(req);
1157
1158         if (lmp_le_capable(hdev)) {
1159                 /* If the controller has a public BD_ADDR, then by
1160                  * default use that one. If this is a LE only
1161                  * controller without one, default to the random
1162                  * address.
1163                  */
1164                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1165                         hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1166                 else
1167                         hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1168
1169                 hci_set_le_support(req);
1170         }
1171
1172         /* Read features beyond page 1 if available */
1173         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1174                 struct hci_cp_read_local_ext_features cp;
1175
1176                 cp.page = p;
1177                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1178                             sizeof(cp), &cp);
1179         }
1180 }
1181
1182 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1183 {
1184         struct hci_dev *hdev = req->hdev;
1185
1186         /* Set event mask page 2 if the HCI command for it is supported */
1187         if (hdev->commands[22] & 0x04)
1188                 hci_set_event_mask_page_2(req);
1189
1190         /* Check for Synchronization Train support */
1191         if (hdev->features[2][0] & 0x04)
1192                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1193 }
1194
1195 static int __hci_init(struct hci_dev *hdev)
1196 {
1197         int err;
1198
1199         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1200         if (err < 0)
1201                 return err;
1202
1203         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1204          * BR/EDR/LE type controllers. AMP controllers only need the
1205          * first stage init.
1206          */
1207         if (hdev->dev_type != HCI_BREDR)
1208                 return 0;
1209
1210         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1211         if (err < 0)
1212                 return err;
1213
1214         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1215         if (err < 0)
1216                 return err;
1217
1218         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1219         if (err < 0)
1220                 return err;
1221
1222         /* Only create debugfs entries during the initial setup
1223          * phase and not every time the controller gets powered on.
1224          */
1225         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1226                 return 0;
1227
1228         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1229                             &features_fops);
1230         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1231                            &hdev->manufacturer);
1232         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1233         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1234         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1235                             &blacklist_fops);
1236         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1237
1238         if (lmp_bredr_capable(hdev)) {
1239                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1240                                     hdev, &inquiry_cache_fops);
1241                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1242                                     hdev, &link_keys_fops);
1243                 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1244                                     hdev, &use_debug_keys_fops);
1245                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1246                                     hdev, &dev_class_fops);
1247                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1248                                     hdev, &voice_setting_fops);
1249         }
1250
1251         if (lmp_ssp_capable(hdev)) {
1252                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1253                                     hdev, &auto_accept_delay_fops);
1254                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1255                                     hdev, &ssp_debug_mode_fops);
1256         }
1257
1258         if (lmp_sniff_capable(hdev)) {
1259                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1260                                     hdev, &idle_timeout_fops);
1261                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1262                                     hdev, &sniff_min_interval_fops);
1263                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1264                                     hdev, &sniff_max_interval_fops);
1265         }
1266
1267         if (lmp_le_capable(hdev)) {
1268                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1269                                   &hdev->le_white_list_size);
1270                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1271                                    hdev, &static_address_fops);
1272                 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1273                                     hdev, &own_address_type_fops);
1274                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1275                                     hdev, &long_term_keys_fops);
1276         }
1277
1278         return 0;
1279 }
1280
1281 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1282 {
1283         __u8 scan = opt;
1284
1285         BT_DBG("%s %x", req->hdev->name, scan);
1286
1287         /* Inquiry and Page scans */
1288         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1289 }
1290
1291 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1292 {
1293         __u8 auth = opt;
1294
1295         BT_DBG("%s %x", req->hdev->name, auth);
1296
1297         /* Authentication */
1298         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1299 }
1300
1301 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1302 {
1303         __u8 encrypt = opt;
1304
1305         BT_DBG("%s %x", req->hdev->name, encrypt);
1306
1307         /* Encryption */
1308         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1309 }
1310
1311 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1312 {
1313         __le16 policy = cpu_to_le16(opt);
1314
1315         BT_DBG("%s %x", req->hdev->name, policy);
1316
1317         /* Default link policy */
1318         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1319 }
1320
1321 /* Get HCI device by index.
1322  * Device is held on return. */
1323 struct hci_dev *hci_dev_get(int index)
1324 {
1325         struct hci_dev *hdev = NULL, *d;
1326
1327         BT_DBG("%d", index);
1328
1329         if (index < 0)
1330                 return NULL;
1331
1332         read_lock(&hci_dev_list_lock);
1333         list_for_each_entry(d, &hci_dev_list, list) {
1334                 if (d->id == index) {
1335                         hdev = hci_dev_hold(d);
1336                         break;
1337                 }
1338         }
1339         read_unlock(&hci_dev_list_lock);
1340         return hdev;
1341 }
1342
1343 /* ---- Inquiry support ---- */
1344
1345 bool hci_discovery_active(struct hci_dev *hdev)
1346 {
1347         struct discovery_state *discov = &hdev->discovery;
1348
1349         switch (discov->state) {
1350         case DISCOVERY_FINDING:
1351         case DISCOVERY_RESOLVING:
1352                 return true;
1353
1354         default:
1355                 return false;
1356         }
1357 }
1358
1359 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1360 {
1361         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1362
1363         if (hdev->discovery.state == state)
1364                 return;
1365
1366         switch (state) {
1367         case DISCOVERY_STOPPED:
1368                 if (hdev->discovery.state != DISCOVERY_STARTING)
1369                         mgmt_discovering(hdev, 0);
1370                 break;
1371         case DISCOVERY_STARTING:
1372                 break;
1373         case DISCOVERY_FINDING:
1374                 mgmt_discovering(hdev, 1);
1375                 break;
1376         case DISCOVERY_RESOLVING:
1377                 break;
1378         case DISCOVERY_STOPPING:
1379                 break;
1380         }
1381
1382         hdev->discovery.state = state;
1383 }
1384
1385 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1386 {
1387         struct discovery_state *cache = &hdev->discovery;
1388         struct inquiry_entry *p, *n;
1389
1390         list_for_each_entry_safe(p, n, &cache->all, all) {
1391                 list_del(&p->all);
1392                 kfree(p);
1393         }
1394
1395         INIT_LIST_HEAD(&cache->unknown);
1396         INIT_LIST_HEAD(&cache->resolve);
1397 }
1398
1399 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1400                                                bdaddr_t *bdaddr)
1401 {
1402         struct discovery_state *cache = &hdev->discovery;
1403         struct inquiry_entry *e;
1404
1405         BT_DBG("cache %p, %pMR", cache, bdaddr);
1406
1407         list_for_each_entry(e, &cache->all, all) {
1408                 if (!bacmp(&e->data.bdaddr, bdaddr))
1409                         return e;
1410         }
1411
1412         return NULL;
1413 }
1414
1415 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1416                                                        bdaddr_t *bdaddr)
1417 {
1418         struct discovery_state *cache = &hdev->discovery;
1419         struct inquiry_entry *e;
1420
1421         BT_DBG("cache %p, %pMR", cache, bdaddr);
1422
1423         list_for_each_entry(e, &cache->unknown, list) {
1424                 if (!bacmp(&e->data.bdaddr, bdaddr))
1425                         return e;
1426         }
1427
1428         return NULL;
1429 }
1430
1431 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1432                                                        bdaddr_t *bdaddr,
1433                                                        int state)
1434 {
1435         struct discovery_state *cache = &hdev->discovery;
1436         struct inquiry_entry *e;
1437
1438         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1439
1440         list_for_each_entry(e, &cache->resolve, list) {
1441                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1442                         return e;
1443                 if (!bacmp(&e->data.bdaddr, bdaddr))
1444                         return e;
1445         }
1446
1447         return NULL;
1448 }
1449
1450 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1451                                       struct inquiry_entry *ie)
1452 {
1453         struct discovery_state *cache = &hdev->discovery;
1454         struct list_head *pos = &cache->resolve;
1455         struct inquiry_entry *p;
1456
1457         list_del(&ie->list);
1458
1459         list_for_each_entry(p, &cache->resolve, list) {
1460                 if (p->name_state != NAME_PENDING &&
1461                     abs(p->data.rssi) >= abs(ie->data.rssi))
1462                         break;
1463                 pos = &p->list;
1464         }
1465
1466         list_add(&ie->list, pos);
1467 }
1468
1469 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1470                               bool name_known, bool *ssp)
1471 {
1472         struct discovery_state *cache = &hdev->discovery;
1473         struct inquiry_entry *ie;
1474
1475         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1476
1477         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1478
1479         if (ssp)
1480                 *ssp = data->ssp_mode;
1481
1482         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1483         if (ie) {
1484                 if (ie->data.ssp_mode && ssp)
1485                         *ssp = true;
1486
1487                 if (ie->name_state == NAME_NEEDED &&
1488                     data->rssi != ie->data.rssi) {
1489                         ie->data.rssi = data->rssi;
1490                         hci_inquiry_cache_update_resolve(hdev, ie);
1491                 }
1492
1493                 goto update;
1494         }
1495
1496         /* Entry not in the cache. Add new one. */
1497         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1498         if (!ie)
1499                 return false;
1500
1501         list_add(&ie->all, &cache->all);
1502
1503         if (name_known) {
1504                 ie->name_state = NAME_KNOWN;
1505         } else {
1506                 ie->name_state = NAME_NOT_KNOWN;
1507                 list_add(&ie->list, &cache->unknown);
1508         }
1509
1510 update:
1511         if (name_known && ie->name_state != NAME_KNOWN &&
1512             ie->name_state != NAME_PENDING) {
1513                 ie->name_state = NAME_KNOWN;
1514                 list_del(&ie->list);
1515         }
1516
1517         memcpy(&ie->data, data, sizeof(*data));
1518         ie->timestamp = jiffies;
1519         cache->timestamp = jiffies;
1520
1521         if (ie->name_state == NAME_NOT_KNOWN)
1522                 return false;
1523
1524         return true;
1525 }
1526
1527 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1528 {
1529         struct discovery_state *cache = &hdev->discovery;
1530         struct inquiry_info *info = (struct inquiry_info *) buf;
1531         struct inquiry_entry *e;
1532         int copied = 0;
1533
1534         list_for_each_entry(e, &cache->all, all) {
1535                 struct inquiry_data *data = &e->data;
1536
1537                 if (copied >= num)
1538                         break;
1539
1540                 bacpy(&info->bdaddr, &data->bdaddr);
1541                 info->pscan_rep_mode    = data->pscan_rep_mode;
1542                 info->pscan_period_mode = data->pscan_period_mode;
1543                 info->pscan_mode        = data->pscan_mode;
1544                 memcpy(info->dev_class, data->dev_class, 3);
1545                 info->clock_offset      = data->clock_offset;
1546
1547                 info++;
1548                 copied++;
1549         }
1550
1551         BT_DBG("cache %p, copied %d", cache, copied);
1552         return copied;
1553 }
1554
1555 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1556 {
1557         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1558         struct hci_dev *hdev = req->hdev;
1559         struct hci_cp_inquiry cp;
1560
1561         BT_DBG("%s", hdev->name);
1562
1563         if (test_bit(HCI_INQUIRY, &hdev->flags))
1564                 return;
1565
1566         /* Start Inquiry */
1567         memcpy(&cp.lap, &ir->lap, 3);
1568         cp.length  = ir->length;
1569         cp.num_rsp = ir->num_rsp;
1570         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1571 }
1572
1573 static int wait_inquiry(void *word)
1574 {
1575         schedule();
1576         return signal_pending(current);
1577 }
1578
1579 int hci_inquiry(void __user *arg)
1580 {
1581         __u8 __user *ptr = arg;
1582         struct hci_inquiry_req ir;
1583         struct hci_dev *hdev;
1584         int err = 0, do_inquiry = 0, max_rsp;
1585         long timeo;
1586         __u8 *buf;
1587
1588         if (copy_from_user(&ir, ptr, sizeof(ir)))
1589                 return -EFAULT;
1590
1591         hdev = hci_dev_get(ir.dev_id);
1592         if (!hdev)
1593                 return -ENODEV;
1594
1595         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1596                 err = -EBUSY;
1597                 goto done;
1598         }
1599
1600         if (hdev->dev_type != HCI_BREDR) {
1601                 err = -EOPNOTSUPP;
1602                 goto done;
1603         }
1604
1605         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1606                 err = -EOPNOTSUPP;
1607                 goto done;
1608         }
1609
1610         hci_dev_lock(hdev);
1611         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1612             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1613                 hci_inquiry_cache_flush(hdev);
1614                 do_inquiry = 1;
1615         }
1616         hci_dev_unlock(hdev);
1617
1618         timeo = ir.length * msecs_to_jiffies(2000);
1619
1620         if (do_inquiry) {
1621                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1622                                    timeo);
1623                 if (err < 0)
1624                         goto done;
1625
1626                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1627                  * cleared). If it is interrupted by a signal, return -EINTR.
1628                  */
1629                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1630                                 TASK_INTERRUPTIBLE))
1631                         return -EINTR;
1632         }
1633
1634         /* for unlimited number of responses we will use buffer with
1635          * 255 entries
1636          */
1637         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1638
1639         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1640          * copy it to the user space.
1641          */
1642         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1643         if (!buf) {
1644                 err = -ENOMEM;
1645                 goto done;
1646         }
1647
1648         hci_dev_lock(hdev);
1649         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1650         hci_dev_unlock(hdev);
1651
1652         BT_DBG("num_rsp %d", ir.num_rsp);
1653
1654         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1655                 ptr += sizeof(ir);
1656                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1657                                  ir.num_rsp))
1658                         err = -EFAULT;
1659         } else
1660                 err = -EFAULT;
1661
1662         kfree(buf);
1663
1664 done:
1665         hci_dev_put(hdev);
1666         return err;
1667 }
1668
1669 static int hci_dev_do_open(struct hci_dev *hdev)
1670 {
1671         int ret = 0;
1672
1673         BT_DBG("%s %p", hdev->name, hdev);
1674
1675         hci_req_lock(hdev);
1676
1677         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1678                 ret = -ENODEV;
1679                 goto done;
1680         }
1681
1682         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1683                 /* Check for rfkill but allow the HCI setup stage to
1684                  * proceed (which in itself doesn't cause any RF activity).
1685                  */
1686                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1687                         ret = -ERFKILL;
1688                         goto done;
1689                 }
1690
1691                 /* Check for valid public address or a configured static
1692                  * random adddress, but let the HCI setup proceed to
1693                  * be able to determine if there is a public address
1694                  * or not.
1695                  *
1696                  * This check is only valid for BR/EDR controllers
1697                  * since AMP controllers do not have an address.
1698                  */
1699                 if (hdev->dev_type == HCI_BREDR &&
1700                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1701                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1702                         ret = -EADDRNOTAVAIL;
1703                         goto done;
1704                 }
1705         }
1706
1707         if (test_bit(HCI_UP, &hdev->flags)) {
1708                 ret = -EALREADY;
1709                 goto done;
1710         }
1711
1712         if (hdev->open(hdev)) {
1713                 ret = -EIO;
1714                 goto done;
1715         }
1716
1717         atomic_set(&hdev->cmd_cnt, 1);
1718         set_bit(HCI_INIT, &hdev->flags);
1719
1720         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1721                 ret = hdev->setup(hdev);
1722
1723         if (!ret) {
1724                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1725                         set_bit(HCI_RAW, &hdev->flags);
1726
1727                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1728                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1729                         ret = __hci_init(hdev);
1730         }
1731
1732         clear_bit(HCI_INIT, &hdev->flags);
1733
1734         if (!ret) {
1735                 hci_dev_hold(hdev);
1736                 set_bit(HCI_UP, &hdev->flags);
1737                 hci_notify(hdev, HCI_DEV_UP);
1738                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1739                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1740                     hdev->dev_type == HCI_BREDR) {
1741                         hci_dev_lock(hdev);
1742                         mgmt_powered(hdev, 1);
1743                         hci_dev_unlock(hdev);
1744                 }
1745         } else {
1746                 /* Init failed, cleanup */
1747                 flush_work(&hdev->tx_work);
1748                 flush_work(&hdev->cmd_work);
1749                 flush_work(&hdev->rx_work);
1750
1751                 skb_queue_purge(&hdev->cmd_q);
1752                 skb_queue_purge(&hdev->rx_q);
1753
1754                 if (hdev->flush)
1755                         hdev->flush(hdev);
1756
1757                 if (hdev->sent_cmd) {
1758                         kfree_skb(hdev->sent_cmd);
1759                         hdev->sent_cmd = NULL;
1760                 }
1761
1762                 hdev->close(hdev);
1763                 hdev->flags = 0;
1764         }
1765
1766 done:
1767         hci_req_unlock(hdev);
1768         return ret;
1769 }
1770
1771 /* ---- HCI ioctl helpers ---- */
1772
1773 int hci_dev_open(__u16 dev)
1774 {
1775         struct hci_dev *hdev;
1776         int err;
1777
1778         hdev = hci_dev_get(dev);
1779         if (!hdev)
1780                 return -ENODEV;
1781
1782         /* We need to ensure that no other power on/off work is pending
1783          * before proceeding to call hci_dev_do_open. This is
1784          * particularly important if the setup procedure has not yet
1785          * completed.
1786          */
1787         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1788                 cancel_delayed_work(&hdev->power_off);
1789
1790         /* After this call it is guaranteed that the setup procedure
1791          * has finished. This means that error conditions like RFKILL
1792          * or no valid public or static random address apply.
1793          */
1794         flush_workqueue(hdev->req_workqueue);
1795
1796         err = hci_dev_do_open(hdev);
1797
1798         hci_dev_put(hdev);
1799
1800         return err;
1801 }
1802
1803 static int hci_dev_do_close(struct hci_dev *hdev)
1804 {
1805         BT_DBG("%s %p", hdev->name, hdev);
1806
1807         cancel_delayed_work(&hdev->power_off);
1808
1809         hci_req_cancel(hdev, ENODEV);
1810         hci_req_lock(hdev);
1811
1812         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1813                 del_timer_sync(&hdev->cmd_timer);
1814                 hci_req_unlock(hdev);
1815                 return 0;
1816         }
1817
1818         /* Flush RX and TX works */
1819         flush_work(&hdev->tx_work);
1820         flush_work(&hdev->rx_work);
1821
1822         if (hdev->discov_timeout > 0) {
1823                 cancel_delayed_work(&hdev->discov_off);
1824                 hdev->discov_timeout = 0;
1825                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1826                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1827         }
1828
1829         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1830                 cancel_delayed_work(&hdev->service_cache);
1831
1832         cancel_delayed_work_sync(&hdev->le_scan_disable);
1833
1834         hci_dev_lock(hdev);
1835         hci_inquiry_cache_flush(hdev);
1836         hci_conn_hash_flush(hdev);
1837         hci_dev_unlock(hdev);
1838
1839         hci_notify(hdev, HCI_DEV_DOWN);
1840
1841         if (hdev->flush)
1842                 hdev->flush(hdev);
1843
1844         /* Reset device */
1845         skb_queue_purge(&hdev->cmd_q);
1846         atomic_set(&hdev->cmd_cnt, 1);
1847         if (!test_bit(HCI_RAW, &hdev->flags) &&
1848             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1849             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1850                 set_bit(HCI_INIT, &hdev->flags);
1851                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1852                 clear_bit(HCI_INIT, &hdev->flags);
1853         }
1854
1855         /* flush cmd  work */
1856         flush_work(&hdev->cmd_work);
1857
1858         /* Drop queues */
1859         skb_queue_purge(&hdev->rx_q);
1860         skb_queue_purge(&hdev->cmd_q);
1861         skb_queue_purge(&hdev->raw_q);
1862
1863         /* Drop last sent command */
1864         if (hdev->sent_cmd) {
1865                 del_timer_sync(&hdev->cmd_timer);
1866                 kfree_skb(hdev->sent_cmd);
1867                 hdev->sent_cmd = NULL;
1868         }
1869
1870         kfree_skb(hdev->recv_evt);
1871         hdev->recv_evt = NULL;
1872
1873         /* After this point our queues are empty
1874          * and no tasks are scheduled. */
1875         hdev->close(hdev);
1876
1877         /* Clear flags */
1878         hdev->flags = 0;
1879         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1880
1881         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1882                 if (hdev->dev_type == HCI_BREDR) {
1883                         hci_dev_lock(hdev);
1884                         mgmt_powered(hdev, 0);
1885                         hci_dev_unlock(hdev);
1886                 }
1887         }
1888
1889         /* Controller radio is available but is currently powered down */
1890         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1891
1892         memset(hdev->eir, 0, sizeof(hdev->eir));
1893         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1894
1895         hci_req_unlock(hdev);
1896
1897         hci_dev_put(hdev);
1898         return 0;
1899 }
1900
1901 int hci_dev_close(__u16 dev)
1902 {
1903         struct hci_dev *hdev;
1904         int err;
1905
1906         hdev = hci_dev_get(dev);
1907         if (!hdev)
1908                 return -ENODEV;
1909
1910         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1911                 err = -EBUSY;
1912                 goto done;
1913         }
1914
1915         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1916                 cancel_delayed_work(&hdev->power_off);
1917
1918         err = hci_dev_do_close(hdev);
1919
1920 done:
1921         hci_dev_put(hdev);
1922         return err;
1923 }
1924
1925 int hci_dev_reset(__u16 dev)
1926 {
1927         struct hci_dev *hdev;
1928         int ret = 0;
1929
1930         hdev = hci_dev_get(dev);
1931         if (!hdev)
1932                 return -ENODEV;
1933
1934         hci_req_lock(hdev);
1935
1936         if (!test_bit(HCI_UP, &hdev->flags)) {
1937                 ret = -ENETDOWN;
1938                 goto done;
1939         }
1940
1941         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1942                 ret = -EBUSY;
1943                 goto done;
1944         }
1945
1946         /* Drop queues */
1947         skb_queue_purge(&hdev->rx_q);
1948         skb_queue_purge(&hdev->cmd_q);
1949
1950         hci_dev_lock(hdev);
1951         hci_inquiry_cache_flush(hdev);
1952         hci_conn_hash_flush(hdev);
1953         hci_dev_unlock(hdev);
1954
1955         if (hdev->flush)
1956                 hdev->flush(hdev);
1957
1958         atomic_set(&hdev->cmd_cnt, 1);
1959         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1960
1961         if (!test_bit(HCI_RAW, &hdev->flags))
1962                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1963
1964 done:
1965         hci_req_unlock(hdev);
1966         hci_dev_put(hdev);
1967         return ret;
1968 }
1969
1970 int hci_dev_reset_stat(__u16 dev)
1971 {
1972         struct hci_dev *hdev;
1973         int ret = 0;
1974
1975         hdev = hci_dev_get(dev);
1976         if (!hdev)
1977                 return -ENODEV;
1978
1979         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1980                 ret = -EBUSY;
1981                 goto done;
1982         }
1983
1984         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1985
1986 done:
1987         hci_dev_put(hdev);
1988         return ret;
1989 }
1990
1991 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1992 {
1993         struct hci_dev *hdev;
1994         struct hci_dev_req dr;
1995         int err = 0;
1996
1997         if (copy_from_user(&dr, arg, sizeof(dr)))
1998                 return -EFAULT;
1999
2000         hdev = hci_dev_get(dr.dev_id);
2001         if (!hdev)
2002                 return -ENODEV;
2003
2004         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2005                 err = -EBUSY;
2006                 goto done;
2007         }
2008
2009         if (hdev->dev_type != HCI_BREDR) {
2010                 err = -EOPNOTSUPP;
2011                 goto done;
2012         }
2013
2014         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2015                 err = -EOPNOTSUPP;
2016                 goto done;
2017         }
2018
2019         switch (cmd) {
2020         case HCISETAUTH:
2021                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2022                                    HCI_INIT_TIMEOUT);
2023                 break;
2024
2025         case HCISETENCRYPT:
2026                 if (!lmp_encrypt_capable(hdev)) {
2027                         err = -EOPNOTSUPP;
2028                         break;
2029                 }
2030
2031                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2032                         /* Auth must be enabled first */
2033                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2034                                            HCI_INIT_TIMEOUT);
2035                         if (err)
2036                                 break;
2037                 }
2038
2039                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2040                                    HCI_INIT_TIMEOUT);
2041                 break;
2042
2043         case HCISETSCAN:
2044                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2045                                    HCI_INIT_TIMEOUT);
2046                 break;
2047
2048         case HCISETLINKPOL:
2049                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2050                                    HCI_INIT_TIMEOUT);
2051                 break;
2052
2053         case HCISETLINKMODE:
2054                 hdev->link_mode = ((__u16) dr.dev_opt) &
2055                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2056                 break;
2057
2058         case HCISETPTYPE:
2059                 hdev->pkt_type = (__u16) dr.dev_opt;
2060                 break;
2061
2062         case HCISETACLMTU:
2063                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2064                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2065                 break;
2066
2067         case HCISETSCOMTU:
2068                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2069                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2070                 break;
2071
2072         default:
2073                 err = -EINVAL;
2074                 break;
2075         }
2076
2077 done:
2078         hci_dev_put(hdev);
2079         return err;
2080 }
2081
2082 int hci_get_dev_list(void __user *arg)
2083 {
2084         struct hci_dev *hdev;
2085         struct hci_dev_list_req *dl;
2086         struct hci_dev_req *dr;
2087         int n = 0, size, err;
2088         __u16 dev_num;
2089
2090         if (get_user(dev_num, (__u16 __user *) arg))
2091                 return -EFAULT;
2092
2093         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2094                 return -EINVAL;
2095
2096         size = sizeof(*dl) + dev_num * sizeof(*dr);
2097
2098         dl = kzalloc(size, GFP_KERNEL);
2099         if (!dl)
2100                 return -ENOMEM;
2101
2102         dr = dl->dev_req;
2103
2104         read_lock(&hci_dev_list_lock);
2105         list_for_each_entry(hdev, &hci_dev_list, list) {
2106                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2107                         cancel_delayed_work(&hdev->power_off);
2108
2109                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2110                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2111
2112                 (dr + n)->dev_id  = hdev->id;
2113                 (dr + n)->dev_opt = hdev->flags;
2114
2115                 if (++n >= dev_num)
2116                         break;
2117         }
2118         read_unlock(&hci_dev_list_lock);
2119
2120         dl->dev_num = n;
2121         size = sizeof(*dl) + n * sizeof(*dr);
2122
2123         err = copy_to_user(arg, dl, size);
2124         kfree(dl);
2125
2126         return err ? -EFAULT : 0;
2127 }
2128
2129 int hci_get_dev_info(void __user *arg)
2130 {
2131         struct hci_dev *hdev;
2132         struct hci_dev_info di;
2133         int err = 0;
2134
2135         if (copy_from_user(&di, arg, sizeof(di)))
2136                 return -EFAULT;
2137
2138         hdev = hci_dev_get(di.dev_id);
2139         if (!hdev)
2140                 return -ENODEV;
2141
2142         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2143                 cancel_delayed_work_sync(&hdev->power_off);
2144
2145         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2146                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2147
2148         strcpy(di.name, hdev->name);
2149         di.bdaddr   = hdev->bdaddr;
2150         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2151         di.flags    = hdev->flags;
2152         di.pkt_type = hdev->pkt_type;
2153         if (lmp_bredr_capable(hdev)) {
2154                 di.acl_mtu  = hdev->acl_mtu;
2155                 di.acl_pkts = hdev->acl_pkts;
2156                 di.sco_mtu  = hdev->sco_mtu;
2157                 di.sco_pkts = hdev->sco_pkts;
2158         } else {
2159                 di.acl_mtu  = hdev->le_mtu;
2160                 di.acl_pkts = hdev->le_pkts;
2161                 di.sco_mtu  = 0;
2162                 di.sco_pkts = 0;
2163         }
2164         di.link_policy = hdev->link_policy;
2165         di.link_mode   = hdev->link_mode;
2166
2167         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2168         memcpy(&di.features, &hdev->features, sizeof(di.features));
2169
2170         if (copy_to_user(arg, &di, sizeof(di)))
2171                 err = -EFAULT;
2172
2173         hci_dev_put(hdev);
2174
2175         return err;
2176 }
2177
2178 /* ---- Interface to HCI drivers ---- */
2179
2180 static int hci_rfkill_set_block(void *data, bool blocked)
2181 {
2182         struct hci_dev *hdev = data;
2183
2184         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2185
2186         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2187                 return -EBUSY;
2188
2189         if (blocked) {
2190                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2191                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2192                         hci_dev_do_close(hdev);
2193         } else {
2194                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2195         }
2196
2197         return 0;
2198 }
2199
2200 static const struct rfkill_ops hci_rfkill_ops = {
2201         .set_block = hci_rfkill_set_block,
2202 };
2203
2204 static void hci_power_on(struct work_struct *work)
2205 {
2206         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2207         int err;
2208
2209         BT_DBG("%s", hdev->name);
2210
2211         err = hci_dev_do_open(hdev);
2212         if (err < 0) {
2213                 mgmt_set_powered_failed(hdev, err);
2214                 return;
2215         }
2216
2217         /* During the HCI setup phase, a few error conditions are
2218          * ignored and they need to be checked now. If they are still
2219          * valid, it is important to turn the device back off.
2220          */
2221         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2222             (hdev->dev_type == HCI_BREDR &&
2223              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2224              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2225                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2226                 hci_dev_do_close(hdev);
2227         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2228                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2229                                    HCI_AUTO_OFF_TIMEOUT);
2230         }
2231
2232         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2233                 mgmt_index_added(hdev);
2234 }
2235
2236 static void hci_power_off(struct work_struct *work)
2237 {
2238         struct hci_dev *hdev = container_of(work, struct hci_dev,
2239                                             power_off.work);
2240
2241         BT_DBG("%s", hdev->name);
2242
2243         hci_dev_do_close(hdev);
2244 }
2245
2246 static void hci_discov_off(struct work_struct *work)
2247 {
2248         struct hci_dev *hdev;
2249
2250         hdev = container_of(work, struct hci_dev, discov_off.work);
2251
2252         BT_DBG("%s", hdev->name);
2253
2254         mgmt_discoverable_timeout(hdev);
2255 }
2256
2257 int hci_uuids_clear(struct hci_dev *hdev)
2258 {
2259         struct bt_uuid *uuid, *tmp;
2260
2261         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2262                 list_del(&uuid->list);
2263                 kfree(uuid);
2264         }
2265
2266         return 0;
2267 }
2268
2269 int hci_link_keys_clear(struct hci_dev *hdev)
2270 {
2271         struct list_head *p, *n;
2272
2273         list_for_each_safe(p, n, &hdev->link_keys) {
2274                 struct link_key *key;
2275
2276                 key = list_entry(p, struct link_key, list);
2277
2278                 list_del(p);
2279                 kfree(key);
2280         }
2281
2282         return 0;
2283 }
2284
2285 int hci_smp_ltks_clear(struct hci_dev *hdev)
2286 {
2287         struct smp_ltk *k, *tmp;
2288
2289         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2290                 list_del(&k->list);
2291                 kfree(k);
2292         }
2293
2294         return 0;
2295 }
2296
2297 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2298 {
2299         struct link_key *k;
2300
2301         list_for_each_entry(k, &hdev->link_keys, list)
2302                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2303                         return k;
2304
2305         return NULL;
2306 }
2307
2308 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2309                                u8 key_type, u8 old_key_type)
2310 {
2311         /* Legacy key */
2312         if (key_type < 0x03)
2313                 return true;
2314
2315         /* Debug keys are insecure so don't store them persistently */
2316         if (key_type == HCI_LK_DEBUG_COMBINATION)
2317                 return false;
2318
2319         /* Changed combination key and there's no previous one */
2320         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2321                 return false;
2322
2323         /* Security mode 3 case */
2324         if (!conn)
2325                 return true;
2326
2327         /* Neither local nor remote side had no-bonding as requirement */
2328         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2329                 return true;
2330
2331         /* Local side had dedicated bonding as requirement */
2332         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2333                 return true;
2334
2335         /* Remote side had dedicated bonding as requirement */
2336         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2337                 return true;
2338
2339         /* If none of the above criteria match, then don't store the key
2340          * persistently */
2341         return false;
2342 }
2343
2344 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2345 {
2346         struct smp_ltk *k;
2347
2348         list_for_each_entry(k, &hdev->long_term_keys, list) {
2349                 if (k->ediv != ediv ||
2350                     memcmp(rand, k->rand, sizeof(k->rand)))
2351                         continue;
2352
2353                 return k;
2354         }
2355
2356         return NULL;
2357 }
2358
2359 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2360                                      u8 addr_type)
2361 {
2362         struct smp_ltk *k;
2363
2364         list_for_each_entry(k, &hdev->long_term_keys, list)
2365                 if (addr_type == k->bdaddr_type &&
2366                     bacmp(bdaddr, &k->bdaddr) == 0)
2367                         return k;
2368
2369         return NULL;
2370 }
2371
2372 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2373                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2374 {
2375         struct link_key *key, *old_key;
2376         u8 old_key_type;
2377         bool persistent;
2378
2379         old_key = hci_find_link_key(hdev, bdaddr);
2380         if (old_key) {
2381                 old_key_type = old_key->type;
2382                 key = old_key;
2383         } else {
2384                 old_key_type = conn ? conn->key_type : 0xff;
2385                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2386                 if (!key)
2387                         return -ENOMEM;
2388                 list_add(&key->list, &hdev->link_keys);
2389         }
2390
2391         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2392
2393         /* Some buggy controller combinations generate a changed
2394          * combination key for legacy pairing even when there's no
2395          * previous key */
2396         if (type == HCI_LK_CHANGED_COMBINATION &&
2397             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2398                 type = HCI_LK_COMBINATION;
2399                 if (conn)
2400                         conn->key_type = type;
2401         }
2402
2403         bacpy(&key->bdaddr, bdaddr);
2404         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2405         key->pin_len = pin_len;
2406
2407         if (type == HCI_LK_CHANGED_COMBINATION)
2408                 key->type = old_key_type;
2409         else
2410                 key->type = type;
2411
2412         if (!new_key)
2413                 return 0;
2414
2415         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2416
2417         mgmt_new_link_key(hdev, key, persistent);
2418
2419         if (conn)
2420                 conn->flush_key = !persistent;
2421
2422         return 0;
2423 }
2424
2425 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2426                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2427                 ediv, u8 rand[8])
2428 {
2429         struct smp_ltk *key, *old_key;
2430
2431         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2432                 return 0;
2433
2434         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2435         if (old_key)
2436                 key = old_key;
2437         else {
2438                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2439                 if (!key)
2440                         return -ENOMEM;
2441                 list_add(&key->list, &hdev->long_term_keys);
2442         }
2443
2444         bacpy(&key->bdaddr, bdaddr);
2445         key->bdaddr_type = addr_type;
2446         memcpy(key->val, tk, sizeof(key->val));
2447         key->authenticated = authenticated;
2448         key->ediv = ediv;
2449         key->enc_size = enc_size;
2450         key->type = type;
2451         memcpy(key->rand, rand, sizeof(key->rand));
2452
2453         if (!new_key)
2454                 return 0;
2455
2456         if (type & HCI_SMP_LTK)
2457                 mgmt_new_ltk(hdev, key, 1);
2458
2459         return 0;
2460 }
2461
2462 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2463 {
2464         struct link_key *key;
2465
2466         key = hci_find_link_key(hdev, bdaddr);
2467         if (!key)
2468                 return -ENOENT;
2469
2470         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2471
2472         list_del(&key->list);
2473         kfree(key);
2474
2475         return 0;
2476 }
2477
2478 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2479 {
2480         struct smp_ltk *k, *tmp;
2481
2482         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2483                 if (bacmp(bdaddr, &k->bdaddr))
2484                         continue;
2485
2486                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2487
2488                 list_del(&k->list);
2489                 kfree(k);
2490         }
2491
2492         return 0;
2493 }
2494
2495 /* HCI command timer function */
2496 static void hci_cmd_timeout(unsigned long arg)
2497 {
2498         struct hci_dev *hdev = (void *) arg;
2499
2500         if (hdev->sent_cmd) {
2501                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2502                 u16 opcode = __le16_to_cpu(sent->opcode);
2503
2504                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2505         } else {
2506                 BT_ERR("%s command tx timeout", hdev->name);
2507         }
2508
2509         atomic_set(&hdev->cmd_cnt, 1);
2510         queue_work(hdev->workqueue, &hdev->cmd_work);
2511 }
2512
2513 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2514                                           bdaddr_t *bdaddr)
2515 {
2516         struct oob_data *data;
2517
2518         list_for_each_entry(data, &hdev->remote_oob_data, list)
2519                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2520                         return data;
2521
2522         return NULL;
2523 }
2524
2525 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2526 {
2527         struct oob_data *data;
2528
2529         data = hci_find_remote_oob_data(hdev, bdaddr);
2530         if (!data)
2531                 return -ENOENT;
2532
2533         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2534
2535         list_del(&data->list);
2536         kfree(data);
2537
2538         return 0;
2539 }
2540
2541 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2542 {
2543         struct oob_data *data, *n;
2544
2545         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2546                 list_del(&data->list);
2547                 kfree(data);
2548         }
2549
2550         return 0;
2551 }
2552
2553 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2554                             u8 *randomizer)
2555 {
2556         struct oob_data *data;
2557
2558         data = hci_find_remote_oob_data(hdev, bdaddr);
2559
2560         if (!data) {
2561                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2562                 if (!data)
2563                         return -ENOMEM;
2564
2565                 bacpy(&data->bdaddr, bdaddr);
2566                 list_add(&data->list, &hdev->remote_oob_data);
2567         }
2568
2569         memcpy(data->hash, hash, sizeof(data->hash));
2570         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2571
2572         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2573
2574         return 0;
2575 }
2576
2577 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2578                                          bdaddr_t *bdaddr, u8 type)
2579 {
2580         struct bdaddr_list *b;
2581
2582         list_for_each_entry(b, &hdev->blacklist, list) {
2583                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2584                         return b;
2585         }
2586
2587         return NULL;
2588 }
2589
2590 int hci_blacklist_clear(struct hci_dev *hdev)
2591 {
2592         struct list_head *p, *n;
2593
2594         list_for_each_safe(p, n, &hdev->blacklist) {
2595                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2596
2597                 list_del(p);
2598                 kfree(b);
2599         }
2600
2601         return 0;
2602 }
2603
2604 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2605 {
2606         struct bdaddr_list *entry;
2607
2608         if (!bacmp(bdaddr, BDADDR_ANY))
2609                 return -EBADF;
2610
2611         if (hci_blacklist_lookup(hdev, bdaddr, type))
2612                 return -EEXIST;
2613
2614         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2615         if (!entry)
2616                 return -ENOMEM;
2617
2618         bacpy(&entry->bdaddr, bdaddr);
2619         entry->bdaddr_type = type;
2620
2621         list_add(&entry->list, &hdev->blacklist);
2622
2623         return mgmt_device_blocked(hdev, bdaddr, type);
2624 }
2625
2626 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2627 {
2628         struct bdaddr_list *entry;
2629
2630         if (!bacmp(bdaddr, BDADDR_ANY))
2631                 return hci_blacklist_clear(hdev);
2632
2633         entry = hci_blacklist_lookup(hdev, bdaddr, type);
2634         if (!entry)
2635                 return -ENOENT;
2636
2637         list_del(&entry->list);
2638         kfree(entry);
2639
2640         return mgmt_device_unblocked(hdev, bdaddr, type);
2641 }
2642
2643 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2644 {
2645         if (status) {
2646                 BT_ERR("Failed to start inquiry: status %d", status);
2647
2648                 hci_dev_lock(hdev);
2649                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2650                 hci_dev_unlock(hdev);
2651                 return;
2652         }
2653 }
2654
2655 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2656 {
2657         /* General inquiry access code (GIAC) */
2658         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2659         struct hci_request req;
2660         struct hci_cp_inquiry cp;
2661         int err;
2662
2663         if (status) {
2664                 BT_ERR("Failed to disable LE scanning: status %d", status);
2665                 return;
2666         }
2667
2668         switch (hdev->discovery.type) {
2669         case DISCOV_TYPE_LE:
2670                 hci_dev_lock(hdev);
2671                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2672                 hci_dev_unlock(hdev);
2673                 break;
2674
2675         case DISCOV_TYPE_INTERLEAVED:
2676                 hci_req_init(&req, hdev);
2677
2678                 memset(&cp, 0, sizeof(cp));
2679                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2680                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2681                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2682
2683                 hci_dev_lock(hdev);
2684
2685                 hci_inquiry_cache_flush(hdev);
2686
2687                 err = hci_req_run(&req, inquiry_complete);
2688                 if (err) {
2689                         BT_ERR("Inquiry request failed: err %d", err);
2690                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2691                 }
2692
2693                 hci_dev_unlock(hdev);
2694                 break;
2695         }
2696 }
2697
2698 static void le_scan_disable_work(struct work_struct *work)
2699 {
2700         struct hci_dev *hdev = container_of(work, struct hci_dev,
2701                                             le_scan_disable.work);
2702         struct hci_cp_le_set_scan_enable cp;
2703         struct hci_request req;
2704         int err;
2705
2706         BT_DBG("%s", hdev->name);
2707
2708         hci_req_init(&req, hdev);
2709
2710         memset(&cp, 0, sizeof(cp));
2711         cp.enable = LE_SCAN_DISABLE;
2712         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2713
2714         err = hci_req_run(&req, le_scan_disable_work_complete);
2715         if (err)
2716                 BT_ERR("Disable LE scanning request failed: err %d", err);
2717 }
2718
2719 /* Alloc HCI device */
2720 struct hci_dev *hci_alloc_dev(void)
2721 {
2722         struct hci_dev *hdev;
2723
2724         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2725         if (!hdev)
2726                 return NULL;
2727
2728         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2729         hdev->esco_type = (ESCO_HV1);
2730         hdev->link_mode = (HCI_LM_ACCEPT);
2731         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2732         hdev->io_capability = 0x03;     /* No Input No Output */
2733         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2734         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2735
2736         hdev->sniff_max_interval = 800;
2737         hdev->sniff_min_interval = 80;
2738
2739         hdev->le_scan_interval = 0x0060;
2740         hdev->le_scan_window = 0x0030;
2741
2742         mutex_init(&hdev->lock);
2743         mutex_init(&hdev->req_lock);
2744
2745         INIT_LIST_HEAD(&hdev->mgmt_pending);
2746         INIT_LIST_HEAD(&hdev->blacklist);
2747         INIT_LIST_HEAD(&hdev->uuids);
2748         INIT_LIST_HEAD(&hdev->link_keys);
2749         INIT_LIST_HEAD(&hdev->long_term_keys);
2750         INIT_LIST_HEAD(&hdev->remote_oob_data);
2751         INIT_LIST_HEAD(&hdev->conn_hash.list);
2752
2753         INIT_WORK(&hdev->rx_work, hci_rx_work);
2754         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2755         INIT_WORK(&hdev->tx_work, hci_tx_work);
2756         INIT_WORK(&hdev->power_on, hci_power_on);
2757
2758         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2759         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2760         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2761
2762         skb_queue_head_init(&hdev->rx_q);
2763         skb_queue_head_init(&hdev->cmd_q);
2764         skb_queue_head_init(&hdev->raw_q);
2765
2766         init_waitqueue_head(&hdev->req_wait_q);
2767
2768         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2769
2770         hci_init_sysfs(hdev);
2771         discovery_init(hdev);
2772
2773         return hdev;
2774 }
2775 EXPORT_SYMBOL(hci_alloc_dev);
2776
2777 /* Free HCI device */
2778 void hci_free_dev(struct hci_dev *hdev)
2779 {
2780         /* will free via device release */
2781         put_device(&hdev->dev);
2782 }
2783 EXPORT_SYMBOL(hci_free_dev);
2784
2785 /* Register HCI device */
2786 int hci_register_dev(struct hci_dev *hdev)
2787 {
2788         int id, error;
2789
2790         if (!hdev->open || !hdev->close)
2791                 return -EINVAL;
2792
2793         /* Do not allow HCI_AMP devices to register at index 0,
2794          * so the index can be used as the AMP controller ID.
2795          */
2796         switch (hdev->dev_type) {
2797         case HCI_BREDR:
2798                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2799                 break;
2800         case HCI_AMP:
2801                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2802                 break;
2803         default:
2804                 return -EINVAL;
2805         }
2806
2807         if (id < 0)
2808                 return id;
2809
2810         sprintf(hdev->name, "hci%d", id);
2811         hdev->id = id;
2812
2813         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2814
2815         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2816                                           WQ_MEM_RECLAIM, 1, hdev->name);
2817         if (!hdev->workqueue) {
2818                 error = -ENOMEM;
2819                 goto err;
2820         }
2821
2822         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2823                                               WQ_MEM_RECLAIM, 1, hdev->name);
2824         if (!hdev->req_workqueue) {
2825                 destroy_workqueue(hdev->workqueue);
2826                 error = -ENOMEM;
2827                 goto err;
2828         }
2829
2830         if (!IS_ERR_OR_NULL(bt_debugfs))
2831                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2832
2833         dev_set_name(&hdev->dev, "%s", hdev->name);
2834
2835         error = device_add(&hdev->dev);
2836         if (error < 0)
2837                 goto err_wqueue;
2838
2839         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2840                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2841                                     hdev);
2842         if (hdev->rfkill) {
2843                 if (rfkill_register(hdev->rfkill) < 0) {
2844                         rfkill_destroy(hdev->rfkill);
2845                         hdev->rfkill = NULL;
2846                 }
2847         }
2848
2849         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2850                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2851
2852         set_bit(HCI_SETUP, &hdev->dev_flags);
2853         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2854
2855         if (hdev->dev_type == HCI_BREDR) {
2856                 /* Assume BR/EDR support until proven otherwise (such as
2857                  * through reading supported features during init.
2858                  */
2859                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2860         }
2861
2862         write_lock(&hci_dev_list_lock);
2863         list_add(&hdev->list, &hci_dev_list);
2864         write_unlock(&hci_dev_list_lock);
2865
2866         hci_notify(hdev, HCI_DEV_REG);
2867         hci_dev_hold(hdev);
2868
2869         queue_work(hdev->req_workqueue, &hdev->power_on);
2870
2871         return id;
2872
2873 err_wqueue:
2874         destroy_workqueue(hdev->workqueue);
2875         destroy_workqueue(hdev->req_workqueue);
2876 err:
2877         ida_simple_remove(&hci_index_ida, hdev->id);
2878
2879         return error;
2880 }
2881 EXPORT_SYMBOL(hci_register_dev);
2882
2883 /* Unregister HCI device */
2884 void hci_unregister_dev(struct hci_dev *hdev)
2885 {
2886         int i, id;
2887
2888         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2889
2890         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2891
2892         id = hdev->id;
2893
2894         write_lock(&hci_dev_list_lock);
2895         list_del(&hdev->list);
2896         write_unlock(&hci_dev_list_lock);
2897
2898         hci_dev_do_close(hdev);
2899
2900         for (i = 0; i < NUM_REASSEMBLY; i++)
2901                 kfree_skb(hdev->reassembly[i]);
2902
2903         cancel_work_sync(&hdev->power_on);
2904
2905         if (!test_bit(HCI_INIT, &hdev->flags) &&
2906             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2907                 hci_dev_lock(hdev);
2908                 mgmt_index_removed(hdev);
2909                 hci_dev_unlock(hdev);
2910         }
2911
2912         /* mgmt_index_removed should take care of emptying the
2913          * pending list */
2914         BUG_ON(!list_empty(&hdev->mgmt_pending));
2915
2916         hci_notify(hdev, HCI_DEV_UNREG);
2917
2918         if (hdev->rfkill) {
2919                 rfkill_unregister(hdev->rfkill);
2920                 rfkill_destroy(hdev->rfkill);
2921         }
2922
2923         device_del(&hdev->dev);
2924
2925         debugfs_remove_recursive(hdev->debugfs);
2926
2927         destroy_workqueue(hdev->workqueue);
2928         destroy_workqueue(hdev->req_workqueue);
2929
2930         hci_dev_lock(hdev);
2931         hci_blacklist_clear(hdev);
2932         hci_uuids_clear(hdev);
2933         hci_link_keys_clear(hdev);
2934         hci_smp_ltks_clear(hdev);
2935         hci_remote_oob_data_clear(hdev);
2936         hci_dev_unlock(hdev);
2937
2938         hci_dev_put(hdev);
2939
2940         ida_simple_remove(&hci_index_ida, id);
2941 }
2942 EXPORT_SYMBOL(hci_unregister_dev);
2943
2944 /* Suspend HCI device */
2945 int hci_suspend_dev(struct hci_dev *hdev)
2946 {
2947         hci_notify(hdev, HCI_DEV_SUSPEND);
2948         return 0;
2949 }
2950 EXPORT_SYMBOL(hci_suspend_dev);
2951
2952 /* Resume HCI device */
2953 int hci_resume_dev(struct hci_dev *hdev)
2954 {
2955         hci_notify(hdev, HCI_DEV_RESUME);
2956         return 0;
2957 }
2958 EXPORT_SYMBOL(hci_resume_dev);
2959
2960 /* Receive frame from HCI drivers */
2961 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2962 {
2963         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2964                       && !test_bit(HCI_INIT, &hdev->flags))) {
2965                 kfree_skb(skb);
2966                 return -ENXIO;
2967         }
2968
2969         /* Incoming skb */
2970         bt_cb(skb)->incoming = 1;
2971
2972         /* Time stamp */
2973         __net_timestamp(skb);
2974
2975         skb_queue_tail(&hdev->rx_q, skb);
2976         queue_work(hdev->workqueue, &hdev->rx_work);
2977
2978         return 0;
2979 }
2980 EXPORT_SYMBOL(hci_recv_frame);
2981
2982 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2983                           int count, __u8 index)
2984 {
2985         int len = 0;
2986         int hlen = 0;
2987         int remain = count;
2988         struct sk_buff *skb;
2989         struct bt_skb_cb *scb;
2990
2991         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2992             index >= NUM_REASSEMBLY)
2993                 return -EILSEQ;
2994
2995         skb = hdev->reassembly[index];
2996
2997         if (!skb) {
2998                 switch (type) {
2999                 case HCI_ACLDATA_PKT:
3000                         len = HCI_MAX_FRAME_SIZE;
3001                         hlen = HCI_ACL_HDR_SIZE;
3002                         break;
3003                 case HCI_EVENT_PKT:
3004                         len = HCI_MAX_EVENT_SIZE;
3005                         hlen = HCI_EVENT_HDR_SIZE;
3006                         break;
3007                 case HCI_SCODATA_PKT:
3008                         len = HCI_MAX_SCO_SIZE;
3009                         hlen = HCI_SCO_HDR_SIZE;
3010                         break;
3011                 }
3012
3013                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3014                 if (!skb)
3015                         return -ENOMEM;
3016
3017                 scb = (void *) skb->cb;
3018                 scb->expect = hlen;
3019                 scb->pkt_type = type;
3020
3021                 hdev->reassembly[index] = skb;
3022         }
3023
3024         while (count) {
3025                 scb = (void *) skb->cb;
3026                 len = min_t(uint, scb->expect, count);
3027
3028                 memcpy(skb_put(skb, len), data, len);
3029
3030                 count -= len;
3031                 data += len;
3032                 scb->expect -= len;
3033                 remain = count;
3034
3035                 switch (type) {
3036                 case HCI_EVENT_PKT:
3037                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3038                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3039                                 scb->expect = h->plen;
3040
3041                                 if (skb_tailroom(skb) < scb->expect) {
3042                                         kfree_skb(skb);
3043                                         hdev->reassembly[index] = NULL;
3044                                         return -ENOMEM;
3045                                 }
3046                         }
3047                         break;
3048
3049                 case HCI_ACLDATA_PKT:
3050                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3051                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3052                                 scb->expect = __le16_to_cpu(h->dlen);
3053
3054                                 if (skb_tailroom(skb) < scb->expect) {
3055                                         kfree_skb(skb);
3056                                         hdev->reassembly[index] = NULL;
3057                                         return -ENOMEM;
3058                                 }
3059                         }
3060                         break;
3061
3062                 case HCI_SCODATA_PKT:
3063                         if (skb->len == HCI_SCO_HDR_SIZE) {
3064                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3065                                 scb->expect = h->dlen;
3066
3067                                 if (skb_tailroom(skb) < scb->expect) {
3068                                         kfree_skb(skb);
3069                                         hdev->reassembly[index] = NULL;
3070                                         return -ENOMEM;
3071                                 }
3072                         }
3073                         break;
3074                 }
3075
3076                 if (scb->expect == 0) {
3077                         /* Complete frame */
3078
3079                         bt_cb(skb)->pkt_type = type;
3080                         hci_recv_frame(hdev, skb);
3081
3082                         hdev->reassembly[index] = NULL;
3083                         return remain;
3084                 }
3085         }
3086
3087         return remain;
3088 }
3089
3090 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3091 {
3092         int rem = 0;
3093
3094         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3095                 return -EILSEQ;
3096
3097         while (count) {
3098                 rem = hci_reassembly(hdev, type, data, count, type - 1);
3099                 if (rem < 0)
3100                         return rem;
3101
3102                 data += (count - rem);
3103                 count = rem;
3104         }
3105
3106         return rem;
3107 }
3108 EXPORT_SYMBOL(hci_recv_fragment);
3109
3110 #define STREAM_REASSEMBLY 0
3111
3112 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3113 {
3114         int type;
3115         int rem = 0;
3116
3117         while (count) {
3118                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3119
3120                 if (!skb) {
3121                         struct { char type; } *pkt;
3122
3123                         /* Start of the frame */
3124                         pkt = data;
3125                         type = pkt->type;
3126
3127                         data++;
3128                         count--;
3129                 } else
3130                         type = bt_cb(skb)->pkt_type;
3131
3132                 rem = hci_reassembly(hdev, type, data, count,
3133                                      STREAM_REASSEMBLY);
3134                 if (rem < 0)
3135                         return rem;
3136
3137                 data += (count - rem);
3138                 count = rem;
3139         }
3140
3141         return rem;
3142 }
3143 EXPORT_SYMBOL(hci_recv_stream_fragment);
3144
3145 /* ---- Interface to upper protocols ---- */
3146
3147 int hci_register_cb(struct hci_cb *cb)
3148 {
3149         BT_DBG("%p name %s", cb, cb->name);
3150
3151         write_lock(&hci_cb_list_lock);
3152         list_add(&cb->list, &hci_cb_list);
3153         write_unlock(&hci_cb_list_lock);
3154
3155         return 0;
3156 }
3157 EXPORT_SYMBOL(hci_register_cb);
3158
3159 int hci_unregister_cb(struct hci_cb *cb)
3160 {
3161         BT_DBG("%p name %s", cb, cb->name);
3162
3163         write_lock(&hci_cb_list_lock);
3164         list_del(&cb->list);
3165         write_unlock(&hci_cb_list_lock);
3166
3167         return 0;
3168 }
3169 EXPORT_SYMBOL(hci_unregister_cb);
3170
3171 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3172 {
3173         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3174
3175         /* Time stamp */
3176         __net_timestamp(skb);
3177
3178         /* Send copy to monitor */
3179         hci_send_to_monitor(hdev, skb);
3180
3181         if (atomic_read(&hdev->promisc)) {
3182                 /* Send copy to the sockets */
3183                 hci_send_to_sock(hdev, skb);
3184         }
3185
3186         /* Get rid of skb owner, prior to sending to the driver. */
3187         skb_orphan(skb);
3188
3189         if (hdev->send(hdev, skb) < 0)
3190                 BT_ERR("%s sending frame failed", hdev->name);
3191 }
3192
3193 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3194 {
3195         skb_queue_head_init(&req->cmd_q);
3196         req->hdev = hdev;
3197         req->err = 0;
3198 }
3199
3200 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3201 {
3202         struct hci_dev *hdev = req->hdev;
3203         struct sk_buff *skb;
3204         unsigned long flags;
3205
3206         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3207
3208         /* If an error occured during request building, remove all HCI
3209          * commands queued on the HCI request queue.
3210          */
3211         if (req->err) {
3212                 skb_queue_purge(&req->cmd_q);
3213                 return req->err;
3214         }
3215
3216         /* Do not allow empty requests */
3217         if (skb_queue_empty(&req->cmd_q))
3218                 return -ENODATA;
3219
3220         skb = skb_peek_tail(&req->cmd_q);
3221         bt_cb(skb)->req.complete = complete;
3222
3223         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3224         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3225         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3226
3227         queue_work(hdev->workqueue, &hdev->cmd_work);
3228
3229         return 0;
3230 }
3231
3232 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3233                                        u32 plen, const void *param)
3234 {
3235         int len = HCI_COMMAND_HDR_SIZE + plen;
3236         struct hci_command_hdr *hdr;
3237         struct sk_buff *skb;
3238
3239         skb = bt_skb_alloc(len, GFP_ATOMIC);
3240         if (!skb)
3241                 return NULL;
3242
3243         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3244         hdr->opcode = cpu_to_le16(opcode);
3245         hdr->plen   = plen;
3246
3247         if (plen)
3248                 memcpy(skb_put(skb, plen), param, plen);
3249
3250         BT_DBG("skb len %d", skb->len);
3251
3252         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3253
3254         return skb;
3255 }
3256
3257 /* Send HCI command */
3258 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3259                  const void *param)
3260 {
3261         struct sk_buff *skb;
3262
3263         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3264
3265         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3266         if (!skb) {
3267                 BT_ERR("%s no memory for command", hdev->name);
3268                 return -ENOMEM;
3269         }
3270
3271         /* Stand-alone HCI commands must be flaged as
3272          * single-command requests.
3273          */
3274         bt_cb(skb)->req.start = true;
3275
3276         skb_queue_tail(&hdev->cmd_q, skb);
3277         queue_work(hdev->workqueue, &hdev->cmd_work);
3278
3279         return 0;
3280 }
3281
3282 /* Queue a command to an asynchronous HCI request */
3283 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3284                     const void *param, u8 event)
3285 {
3286         struct hci_dev *hdev = req->hdev;
3287         struct sk_buff *skb;
3288
3289         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3290
3291         /* If an error occured during request building, there is no point in
3292          * queueing the HCI command. We can simply return.
3293          */
3294         if (req->err)
3295                 return;
3296
3297         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3298         if (!skb) {
3299                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3300                        hdev->name, opcode);
3301                 req->err = -ENOMEM;
3302                 return;
3303         }
3304
3305         if (skb_queue_empty(&req->cmd_q))
3306                 bt_cb(skb)->req.start = true;
3307
3308         bt_cb(skb)->req.event = event;
3309
3310         skb_queue_tail(&req->cmd_q, skb);
3311 }
3312
3313 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3314                  const void *param)
3315 {
3316         hci_req_add_ev(req, opcode, plen, param, 0);
3317 }
3318
3319 /* Get data from the previously sent command */
3320 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3321 {
3322         struct hci_command_hdr *hdr;
3323
3324         if (!hdev->sent_cmd)
3325                 return NULL;
3326
3327         hdr = (void *) hdev->sent_cmd->data;
3328
3329         if (hdr->opcode != cpu_to_le16(opcode))
3330                 return NULL;
3331
3332         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3333
3334         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3335 }
3336
3337 /* Send ACL data */
3338 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3339 {
3340         struct hci_acl_hdr *hdr;
3341         int len = skb->len;
3342
3343         skb_push(skb, HCI_ACL_HDR_SIZE);
3344         skb_reset_transport_header(skb);
3345         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3346         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3347         hdr->dlen   = cpu_to_le16(len);
3348 }
3349
3350 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3351                           struct sk_buff *skb, __u16 flags)
3352 {
3353         struct hci_conn *conn = chan->conn;
3354         struct hci_dev *hdev = conn->hdev;
3355         struct sk_buff *list;
3356
3357         skb->len = skb_headlen(skb);
3358         skb->data_len = 0;
3359
3360         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3361
3362         switch (hdev->dev_type) {
3363         case HCI_BREDR:
3364                 hci_add_acl_hdr(skb, conn->handle, flags);
3365                 break;
3366         case HCI_AMP:
3367                 hci_add_acl_hdr(skb, chan->handle, flags);
3368                 break;
3369         default:
3370                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3371                 return;
3372         }
3373
3374         list = skb_shinfo(skb)->frag_list;
3375         if (!list) {
3376                 /* Non fragmented */
3377                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3378
3379                 skb_queue_tail(queue, skb);
3380         } else {
3381                 /* Fragmented */
3382                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3383
3384                 skb_shinfo(skb)->frag_list = NULL;
3385
3386                 /* Queue all fragments atomically */
3387                 spin_lock(&queue->lock);
3388
3389                 __skb_queue_tail(queue, skb);
3390
3391                 flags &= ~ACL_START;
3392                 flags |= ACL_CONT;
3393                 do {
3394                         skb = list; list = list->next;
3395
3396                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3397                         hci_add_acl_hdr(skb, conn->handle, flags);
3398
3399                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3400
3401                         __skb_queue_tail(queue, skb);
3402                 } while (list);
3403
3404                 spin_unlock(&queue->lock);
3405         }
3406 }
3407
3408 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3409 {
3410         struct hci_dev *hdev = chan->conn->hdev;
3411
3412         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3413
3414         hci_queue_acl(chan, &chan->data_q, skb, flags);
3415
3416         queue_work(hdev->workqueue, &hdev->tx_work);
3417 }
3418
3419 /* Send SCO data */
3420 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3421 {
3422         struct hci_dev *hdev = conn->hdev;
3423         struct hci_sco_hdr hdr;
3424
3425         BT_DBG("%s len %d", hdev->name, skb->len);
3426
3427         hdr.handle = cpu_to_le16(conn->handle);
3428         hdr.dlen   = skb->len;
3429
3430         skb_push(skb, HCI_SCO_HDR_SIZE);
3431         skb_reset_transport_header(skb);
3432         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3433
3434         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3435
3436         skb_queue_tail(&conn->data_q, skb);
3437         queue_work(hdev->workqueue, &hdev->tx_work);
3438 }
3439
3440 /* ---- HCI TX task (outgoing data) ---- */
3441
3442 /* HCI Connection scheduler */
3443 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3444                                      int *quote)
3445 {
3446         struct hci_conn_hash *h = &hdev->conn_hash;
3447         struct hci_conn *conn = NULL, *c;
3448         unsigned int num = 0, min = ~0;
3449
3450         /* We don't have to lock device here. Connections are always
3451          * added and removed with TX task disabled. */
3452
3453         rcu_read_lock();
3454
3455         list_for_each_entry_rcu(c, &h->list, list) {
3456                 if (c->type != type || skb_queue_empty(&c->data_q))
3457                         continue;
3458
3459                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3460                         continue;
3461
3462                 num++;
3463
3464                 if (c->sent < min) {
3465                         min  = c->sent;
3466                         conn = c;
3467                 }
3468
3469                 if (hci_conn_num(hdev, type) == num)
3470                         break;
3471         }
3472
3473         rcu_read_unlock();
3474
3475         if (conn) {
3476                 int cnt, q;
3477
3478                 switch (conn->type) {
3479                 case ACL_LINK:
3480                         cnt = hdev->acl_cnt;
3481                         break;
3482                 case SCO_LINK:
3483                 case ESCO_LINK:
3484                         cnt = hdev->sco_cnt;
3485                         break;
3486                 case LE_LINK:
3487                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3488                         break;
3489                 default:
3490                         cnt = 0;
3491                         BT_ERR("Unknown link type");
3492                 }
3493
3494                 q = cnt / num;
3495                 *quote = q ? q : 1;
3496         } else
3497                 *quote = 0;
3498
3499         BT_DBG("conn %p quote %d", conn, *quote);
3500         return conn;
3501 }
3502
3503 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3504 {
3505         struct hci_conn_hash *h = &hdev->conn_hash;
3506         struct hci_conn *c;
3507
3508         BT_ERR("%s link tx timeout", hdev->name);
3509
3510         rcu_read_lock();
3511
3512         /* Kill stalled connections */
3513         list_for_each_entry_rcu(c, &h->list, list) {
3514                 if (c->type == type && c->sent) {
3515                         BT_ERR("%s killing stalled connection %pMR",
3516                                hdev->name, &c->dst);
3517                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3518                 }
3519         }
3520
3521         rcu_read_unlock();
3522 }
3523
3524 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3525                                       int *quote)
3526 {
3527         struct hci_conn_hash *h = &hdev->conn_hash;
3528         struct hci_chan *chan = NULL;
3529         unsigned int num = 0, min = ~0, cur_prio = 0;
3530         struct hci_conn *conn;
3531         int cnt, q, conn_num = 0;
3532
3533         BT_DBG("%s", hdev->name);
3534
3535         rcu_read_lock();
3536
3537         list_for_each_entry_rcu(conn, &h->list, list) {
3538                 struct hci_chan *tmp;
3539
3540                 if (conn->type != type)
3541                         continue;
3542
3543                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3544                         continue;
3545
3546                 conn_num++;
3547
3548                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3549                         struct sk_buff *skb;
3550
3551                         if (skb_queue_empty(&tmp->data_q))
3552                                 continue;
3553
3554                         skb = skb_peek(&tmp->data_q);
3555                         if (skb->priority < cur_prio)
3556                                 continue;
3557
3558                         if (skb->priority > cur_prio) {
3559                                 num = 0;
3560                                 min = ~0;
3561                                 cur_prio = skb->priority;
3562                         }
3563
3564                         num++;
3565
3566                         if (conn->sent < min) {
3567                                 min  = conn->sent;
3568                                 chan = tmp;
3569                         }
3570                 }
3571
3572                 if (hci_conn_num(hdev, type) == conn_num)
3573                         break;
3574         }
3575
3576         rcu_read_unlock();
3577
3578         if (!chan)
3579                 return NULL;
3580
3581         switch (chan->conn->type) {
3582         case ACL_LINK:
3583                 cnt = hdev->acl_cnt;
3584                 break;
3585         case AMP_LINK:
3586                 cnt = hdev->block_cnt;
3587                 break;
3588         case SCO_LINK:
3589         case ESCO_LINK:
3590                 cnt = hdev->sco_cnt;
3591                 break;
3592         case LE_LINK:
3593                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3594                 break;
3595         default:
3596                 cnt = 0;
3597                 BT_ERR("Unknown link type");
3598         }
3599
3600         q = cnt / num;
3601         *quote = q ? q : 1;
3602         BT_DBG("chan %p quote %d", chan, *quote);
3603         return chan;
3604 }
3605
3606 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3607 {
3608         struct hci_conn_hash *h = &hdev->conn_hash;
3609         struct hci_conn *conn;
3610         int num = 0;
3611
3612         BT_DBG("%s", hdev->name);
3613
3614         rcu_read_lock();
3615
3616         list_for_each_entry_rcu(conn, &h->list, list) {
3617                 struct hci_chan *chan;
3618
3619                 if (conn->type != type)
3620                         continue;
3621
3622                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3623                         continue;
3624
3625                 num++;
3626
3627                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3628                         struct sk_buff *skb;
3629
3630                         if (chan->sent) {
3631                                 chan->sent = 0;
3632                                 continue;
3633                         }
3634
3635                         if (skb_queue_empty(&chan->data_q))
3636                                 continue;
3637
3638                         skb = skb_peek(&chan->data_q);
3639                         if (skb->priority >= HCI_PRIO_MAX - 1)
3640                                 continue;
3641
3642                         skb->priority = HCI_PRIO_MAX - 1;
3643
3644                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3645                                skb->priority);
3646                 }
3647
3648                 if (hci_conn_num(hdev, type) == num)
3649                         break;
3650         }
3651
3652         rcu_read_unlock();
3653
3654 }
3655
3656 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3657 {
3658         /* Calculate count of blocks used by this packet */
3659         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3660 }
3661
3662 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3663 {
3664         if (!test_bit(HCI_RAW, &hdev->flags)) {
3665                 /* ACL tx timeout must be longer than maximum
3666                  * link supervision timeout (40.9 seconds) */
3667                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3668                                        HCI_ACL_TX_TIMEOUT))
3669                         hci_link_tx_to(hdev, ACL_LINK);
3670         }
3671 }
3672
3673 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3674 {
3675         unsigned int cnt = hdev->acl_cnt;
3676         struct hci_chan *chan;
3677         struct sk_buff *skb;
3678         int quote;
3679
3680         __check_timeout(hdev, cnt);
3681
3682         while (hdev->acl_cnt &&
3683                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3684                 u32 priority = (skb_peek(&chan->data_q))->priority;
3685                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3686                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3687                                skb->len, skb->priority);
3688
3689                         /* Stop if priority has changed */
3690                         if (skb->priority < priority)
3691                                 break;
3692
3693                         skb = skb_dequeue(&chan->data_q);
3694
3695                         hci_conn_enter_active_mode(chan->conn,
3696                                                    bt_cb(skb)->force_active);
3697
3698                         hci_send_frame(hdev, skb);
3699                         hdev->acl_last_tx = jiffies;
3700
3701                         hdev->acl_cnt--;
3702                         chan->sent++;
3703                         chan->conn->sent++;
3704                 }
3705         }
3706
3707         if (cnt != hdev->acl_cnt)
3708                 hci_prio_recalculate(hdev, ACL_LINK);
3709 }
3710
3711 static void hci_sched_acl_blk(struct hci_dev *hdev)
3712 {
3713         unsigned int cnt = hdev->block_cnt;
3714         struct hci_chan *chan;
3715         struct sk_buff *skb;
3716         int quote;
3717         u8 type;
3718
3719         __check_timeout(hdev, cnt);
3720
3721         BT_DBG("%s", hdev->name);
3722
3723         if (hdev->dev_type == HCI_AMP)
3724                 type = AMP_LINK;
3725         else
3726                 type = ACL_LINK;
3727
3728         while (hdev->block_cnt > 0 &&
3729                (chan = hci_chan_sent(hdev, type, &quote))) {
3730                 u32 priority = (skb_peek(&chan->data_q))->priority;
3731                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3732                         int blocks;
3733
3734                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3735                                skb->len, skb->priority);
3736
3737                         /* Stop if priority has changed */
3738                         if (skb->priority < priority)
3739                                 break;
3740
3741                         skb = skb_dequeue(&chan->data_q);
3742
3743                         blocks = __get_blocks(hdev, skb);
3744                         if (blocks > hdev->block_cnt)
3745                                 return;
3746
3747                         hci_conn_enter_active_mode(chan->conn,
3748                                                    bt_cb(skb)->force_active);
3749
3750                         hci_send_frame(hdev, skb);
3751                         hdev->acl_last_tx = jiffies;
3752
3753                         hdev->block_cnt -= blocks;
3754                         quote -= blocks;
3755
3756                         chan->sent += blocks;
3757                         chan->conn->sent += blocks;
3758                 }
3759         }
3760
3761         if (cnt != hdev->block_cnt)
3762                 hci_prio_recalculate(hdev, type);
3763 }
3764
3765 static void hci_sched_acl(struct hci_dev *hdev)
3766 {
3767         BT_DBG("%s", hdev->name);
3768
3769         /* No ACL link over BR/EDR controller */
3770         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3771                 return;
3772
3773         /* No AMP link over AMP controller */
3774         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3775                 return;
3776
3777         switch (hdev->flow_ctl_mode) {
3778         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3779                 hci_sched_acl_pkt(hdev);
3780                 break;
3781
3782         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3783                 hci_sched_acl_blk(hdev);
3784                 break;
3785         }
3786 }
3787
3788 /* Schedule SCO */
3789 static void hci_sched_sco(struct hci_dev *hdev)
3790 {
3791         struct hci_conn *conn;
3792         struct sk_buff *skb;
3793         int quote;
3794
3795         BT_DBG("%s", hdev->name);
3796
3797         if (!hci_conn_num(hdev, SCO_LINK))
3798                 return;
3799
3800         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3801                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3802                         BT_DBG("skb %p len %d", skb, skb->len);
3803                         hci_send_frame(hdev, skb);
3804
3805                         conn->sent++;
3806                         if (conn->sent == ~0)
3807                                 conn->sent = 0;
3808                 }
3809         }
3810 }
3811
3812 static void hci_sched_esco(struct hci_dev *hdev)
3813 {
3814         struct hci_conn *conn;
3815         struct sk_buff *skb;
3816         int quote;
3817
3818         BT_DBG("%s", hdev->name);
3819
3820         if (!hci_conn_num(hdev, ESCO_LINK))
3821                 return;
3822
3823         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3824                                                      &quote))) {
3825                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3826                         BT_DBG("skb %p len %d", skb, skb->len);
3827                         hci_send_frame(hdev, skb);
3828
3829                         conn->sent++;
3830                         if (conn->sent == ~0)
3831                                 conn->sent = 0;
3832                 }
3833         }
3834 }
3835
3836 static void hci_sched_le(struct hci_dev *hdev)
3837 {
3838         struct hci_chan *chan;
3839         struct sk_buff *skb;
3840         int quote, cnt, tmp;
3841
3842         BT_DBG("%s", hdev->name);
3843
3844         if (!hci_conn_num(hdev, LE_LINK))
3845                 return;
3846
3847         if (!test_bit(HCI_RAW, &hdev->flags)) {
3848                 /* LE tx timeout must be longer than maximum
3849                  * link supervision timeout (40.9 seconds) */
3850                 if (!hdev->le_cnt && hdev->le_pkts &&
3851                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3852                         hci_link_tx_to(hdev, LE_LINK);
3853         }
3854
3855         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3856         tmp = cnt;
3857         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3858                 u32 priority = (skb_peek(&chan->data_q))->priority;
3859                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3860                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3861                                skb->len, skb->priority);
3862
3863                         /* Stop if priority has changed */
3864                         if (skb->priority < priority)
3865                                 break;
3866
3867                         skb = skb_dequeue(&chan->data_q);
3868
3869                         hci_send_frame(hdev, skb);
3870                         hdev->le_last_tx = jiffies;
3871
3872                         cnt--;
3873                         chan->sent++;
3874                         chan->conn->sent++;
3875                 }
3876         }
3877
3878         if (hdev->le_pkts)
3879                 hdev->le_cnt = cnt;
3880         else
3881                 hdev->acl_cnt = cnt;
3882
3883         if (cnt != tmp)
3884                 hci_prio_recalculate(hdev, LE_LINK);
3885 }
3886
3887 static void hci_tx_work(struct work_struct *work)
3888 {
3889         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3890         struct sk_buff *skb;
3891
3892         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3893                hdev->sco_cnt, hdev->le_cnt);
3894
3895         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3896                 /* Schedule queues and send stuff to HCI driver */
3897                 hci_sched_acl(hdev);
3898                 hci_sched_sco(hdev);
3899                 hci_sched_esco(hdev);
3900                 hci_sched_le(hdev);
3901         }
3902
3903         /* Send next queued raw (unknown type) packet */
3904         while ((skb = skb_dequeue(&hdev->raw_q)))
3905                 hci_send_frame(hdev, skb);
3906 }
3907
3908 /* ----- HCI RX task (incoming data processing) ----- */
3909
3910 /* ACL data packet */
3911 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3912 {
3913         struct hci_acl_hdr *hdr = (void *) skb->data;
3914         struct hci_conn *conn;
3915         __u16 handle, flags;
3916
3917         skb_pull(skb, HCI_ACL_HDR_SIZE);
3918
3919         handle = __le16_to_cpu(hdr->handle);
3920         flags  = hci_flags(handle);
3921         handle = hci_handle(handle);
3922
3923         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3924                handle, flags);
3925
3926         hdev->stat.acl_rx++;
3927
3928         hci_dev_lock(hdev);
3929         conn = hci_conn_hash_lookup_handle(hdev, handle);
3930         hci_dev_unlock(hdev);
3931
3932         if (conn) {
3933                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3934
3935                 /* Send to upper protocol */
3936                 l2cap_recv_acldata(conn, skb, flags);
3937                 return;
3938         } else {
3939                 BT_ERR("%s ACL packet for unknown connection handle %d",
3940                        hdev->name, handle);
3941         }
3942
3943         kfree_skb(skb);
3944 }
3945
3946 /* SCO data packet */
3947 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3948 {
3949         struct hci_sco_hdr *hdr = (void *) skb->data;
3950         struct hci_conn *conn;
3951         __u16 handle;
3952
3953         skb_pull(skb, HCI_SCO_HDR_SIZE);
3954
3955         handle = __le16_to_cpu(hdr->handle);
3956
3957         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3958
3959         hdev->stat.sco_rx++;
3960
3961         hci_dev_lock(hdev);
3962         conn = hci_conn_hash_lookup_handle(hdev, handle);
3963         hci_dev_unlock(hdev);
3964
3965         if (conn) {
3966                 /* Send to upper protocol */
3967                 sco_recv_scodata(conn, skb);
3968                 return;
3969         } else {
3970                 BT_ERR("%s SCO packet for unknown connection handle %d",
3971                        hdev->name, handle);
3972         }
3973
3974         kfree_skb(skb);
3975 }
3976
3977 static bool hci_req_is_complete(struct hci_dev *hdev)
3978 {
3979         struct sk_buff *skb;
3980
3981         skb = skb_peek(&hdev->cmd_q);
3982         if (!skb)
3983                 return true;
3984
3985         return bt_cb(skb)->req.start;
3986 }
3987
3988 static void hci_resend_last(struct hci_dev *hdev)
3989 {
3990         struct hci_command_hdr *sent;
3991         struct sk_buff *skb;
3992         u16 opcode;
3993
3994         if (!hdev->sent_cmd)
3995                 return;
3996
3997         sent = (void *) hdev->sent_cmd->data;
3998         opcode = __le16_to_cpu(sent->opcode);
3999         if (opcode == HCI_OP_RESET)
4000                 return;
4001
4002         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4003         if (!skb)
4004                 return;
4005
4006         skb_queue_head(&hdev->cmd_q, skb);
4007         queue_work(hdev->workqueue, &hdev->cmd_work);
4008 }
4009
4010 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4011 {
4012         hci_req_complete_t req_complete = NULL;
4013         struct sk_buff *skb;
4014         unsigned long flags;
4015
4016         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4017
4018         /* If the completed command doesn't match the last one that was
4019          * sent we need to do special handling of it.
4020          */
4021         if (!hci_sent_cmd_data(hdev, opcode)) {
4022                 /* Some CSR based controllers generate a spontaneous
4023                  * reset complete event during init and any pending
4024                  * command will never be completed. In such a case we
4025                  * need to resend whatever was the last sent
4026                  * command.
4027                  */
4028                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4029                         hci_resend_last(hdev);
4030
4031                 return;
4032         }
4033
4034         /* If the command succeeded and there's still more commands in
4035          * this request the request is not yet complete.
4036          */
4037         if (!status && !hci_req_is_complete(hdev))
4038                 return;
4039
4040         /* If this was the last command in a request the complete
4041          * callback would be found in hdev->sent_cmd instead of the
4042          * command queue (hdev->cmd_q).
4043          */
4044         if (hdev->sent_cmd) {
4045                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4046
4047                 if (req_complete) {
4048                         /* We must set the complete callback to NULL to
4049                          * avoid calling the callback more than once if
4050                          * this function gets called again.
4051                          */
4052                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4053
4054                         goto call_complete;
4055                 }
4056         }
4057
4058         /* Remove all pending commands belonging to this request */
4059         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4060         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4061                 if (bt_cb(skb)->req.start) {
4062                         __skb_queue_head(&hdev->cmd_q, skb);
4063                         break;
4064                 }
4065
4066                 req_complete = bt_cb(skb)->req.complete;
4067                 kfree_skb(skb);
4068         }
4069         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4070
4071 call_complete:
4072         if (req_complete)
4073                 req_complete(hdev, status);
4074 }
4075
4076 static void hci_rx_work(struct work_struct *work)
4077 {
4078         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4079         struct sk_buff *skb;
4080
4081         BT_DBG("%s", hdev->name);
4082
4083         while ((skb = skb_dequeue(&hdev->rx_q))) {
4084                 /* Send copy to monitor */
4085                 hci_send_to_monitor(hdev, skb);
4086
4087                 if (atomic_read(&hdev->promisc)) {
4088                         /* Send copy to the sockets */
4089                         hci_send_to_sock(hdev, skb);
4090                 }
4091
4092                 if (test_bit(HCI_RAW, &hdev->flags) ||
4093                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4094                         kfree_skb(skb);
4095                         continue;
4096                 }
4097
4098                 if (test_bit(HCI_INIT, &hdev->flags)) {
4099                         /* Don't process data packets in this states. */
4100                         switch (bt_cb(skb)->pkt_type) {
4101                         case HCI_ACLDATA_PKT:
4102                         case HCI_SCODATA_PKT:
4103                                 kfree_skb(skb);
4104                                 continue;
4105                         }
4106                 }
4107
4108                 /* Process frame */
4109                 switch (bt_cb(skb)->pkt_type) {
4110                 case HCI_EVENT_PKT:
4111                         BT_DBG("%s Event packet", hdev->name);
4112                         hci_event_packet(hdev, skb);
4113                         break;
4114
4115                 case HCI_ACLDATA_PKT:
4116                         BT_DBG("%s ACL data packet", hdev->name);
4117                         hci_acldata_packet(hdev, skb);
4118                         break;
4119
4120                 case HCI_SCODATA_PKT:
4121                         BT_DBG("%s SCO data packet", hdev->name);
4122                         hci_scodata_packet(hdev, skb);
4123                         break;
4124
4125                 default:
4126                         kfree_skb(skb);
4127                         break;
4128                 }
4129         }
4130 }
4131
4132 static void hci_cmd_work(struct work_struct *work)
4133 {
4134         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4135         struct sk_buff *skb;
4136
4137         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4138                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4139
4140         /* Send queued commands */
4141         if (atomic_read(&hdev->cmd_cnt)) {
4142                 skb = skb_dequeue(&hdev->cmd_q);
4143                 if (!skb)
4144                         return;
4145
4146                 kfree_skb(hdev->sent_cmd);
4147
4148                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4149                 if (hdev->sent_cmd) {
4150                         atomic_dec(&hdev->cmd_cnt);
4151                         hci_send_frame(hdev, skb);
4152                         if (test_bit(HCI_RESET, &hdev->flags))
4153                                 del_timer(&hdev->cmd_timer);
4154                         else
4155                                 mod_timer(&hdev->cmd_timer,
4156                                           jiffies + HCI_CMD_TIMEOUT);
4157                 } else {
4158                         skb_queue_head(&hdev->cmd_q, skb);
4159                         queue_work(hdev->workqueue, &hdev->cmd_work);
4160                 }
4161         }
4162 }