]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Bluetooth: Introduce a whitelist for BR/EDR devices
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int whitelist_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bdaddr_list *b;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(b, &hdev->whitelist, list)
201                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202         hci_dev_unlock(hdev);
203
204         return 0;
205 }
206
207 static int whitelist_open(struct inode *inode, struct file *file)
208 {
209         return single_open(file, whitelist_show, inode->i_private);
210 }
211
212 static const struct file_operations whitelist_fops = {
213         .open           = whitelist_open,
214         .read           = seq_read,
215         .llseek         = seq_lseek,
216         .release        = single_release,
217 };
218
219 static int uuids_show(struct seq_file *f, void *p)
220 {
221         struct hci_dev *hdev = f->private;
222         struct bt_uuid *uuid;
223
224         hci_dev_lock(hdev);
225         list_for_each_entry(uuid, &hdev->uuids, list) {
226                 u8 i, val[16];
227
228                 /* The Bluetooth UUID values are stored in big endian,
229                  * but with reversed byte order. So convert them into
230                  * the right order for the %pUb modifier.
231                  */
232                 for (i = 0; i < 16; i++)
233                         val[i] = uuid->uuid[15 - i];
234
235                 seq_printf(f, "%pUb\n", val);
236         }
237         hci_dev_unlock(hdev);
238
239         return 0;
240 }
241
242 static int uuids_open(struct inode *inode, struct file *file)
243 {
244         return single_open(file, uuids_show, inode->i_private);
245 }
246
247 static const struct file_operations uuids_fops = {
248         .open           = uuids_open,
249         .read           = seq_read,
250         .llseek         = seq_lseek,
251         .release        = single_release,
252 };
253
254 static int inquiry_cache_show(struct seq_file *f, void *p)
255 {
256         struct hci_dev *hdev = f->private;
257         struct discovery_state *cache = &hdev->discovery;
258         struct inquiry_entry *e;
259
260         hci_dev_lock(hdev);
261
262         list_for_each_entry(e, &cache->all, all) {
263                 struct inquiry_data *data = &e->data;
264                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
265                            &data->bdaddr,
266                            data->pscan_rep_mode, data->pscan_period_mode,
267                            data->pscan_mode, data->dev_class[2],
268                            data->dev_class[1], data->dev_class[0],
269                            __le16_to_cpu(data->clock_offset),
270                            data->rssi, data->ssp_mode, e->timestamp);
271         }
272
273         hci_dev_unlock(hdev);
274
275         return 0;
276 }
277
278 static int inquiry_cache_open(struct inode *inode, struct file *file)
279 {
280         return single_open(file, inquiry_cache_show, inode->i_private);
281 }
282
283 static const struct file_operations inquiry_cache_fops = {
284         .open           = inquiry_cache_open,
285         .read           = seq_read,
286         .llseek         = seq_lseek,
287         .release        = single_release,
288 };
289
290 static int link_keys_show(struct seq_file *f, void *ptr)
291 {
292         struct hci_dev *hdev = f->private;
293         struct list_head *p, *n;
294
295         hci_dev_lock(hdev);
296         list_for_each_safe(p, n, &hdev->link_keys) {
297                 struct link_key *key = list_entry(p, struct link_key, list);
298                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
300         }
301         hci_dev_unlock(hdev);
302
303         return 0;
304 }
305
306 static int link_keys_open(struct inode *inode, struct file *file)
307 {
308         return single_open(file, link_keys_show, inode->i_private);
309 }
310
311 static const struct file_operations link_keys_fops = {
312         .open           = link_keys_open,
313         .read           = seq_read,
314         .llseek         = seq_lseek,
315         .release        = single_release,
316 };
317
318 static int dev_class_show(struct seq_file *f, void *ptr)
319 {
320         struct hci_dev *hdev = f->private;
321
322         hci_dev_lock(hdev);
323         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324                    hdev->dev_class[1], hdev->dev_class[0]);
325         hci_dev_unlock(hdev);
326
327         return 0;
328 }
329
330 static int dev_class_open(struct inode *inode, struct file *file)
331 {
332         return single_open(file, dev_class_show, inode->i_private);
333 }
334
335 static const struct file_operations dev_class_fops = {
336         .open           = dev_class_open,
337         .read           = seq_read,
338         .llseek         = seq_lseek,
339         .release        = single_release,
340 };
341
342 static int voice_setting_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->voice_setting;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354                         NULL, "0x%4.4llx\n");
355
356 static int auto_accept_delay_set(void *data, u64 val)
357 {
358         struct hci_dev *hdev = data;
359
360         hci_dev_lock(hdev);
361         hdev->auto_accept_delay = val;
362         hci_dev_unlock(hdev);
363
364         return 0;
365 }
366
367 static int auto_accept_delay_get(void *data, u64 *val)
368 {
369         struct hci_dev *hdev = data;
370
371         hci_dev_lock(hdev);
372         *val = hdev->auto_accept_delay;
373         hci_dev_unlock(hdev);
374
375         return 0;
376 }
377
378 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379                         auto_accept_delay_set, "%llu\n");
380
381 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382                                      size_t count, loff_t *ppos)
383 {
384         struct hci_dev *hdev = file->private_data;
385         char buf[3];
386
387         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
388         buf[1] = '\n';
389         buf[2] = '\0';
390         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
391 }
392
393 static ssize_t force_sc_support_write(struct file *file,
394                                       const char __user *user_buf,
395                                       size_t count, loff_t *ppos)
396 {
397         struct hci_dev *hdev = file->private_data;
398         char buf[32];
399         size_t buf_size = min(count, (sizeof(buf)-1));
400         bool enable;
401
402         if (test_bit(HCI_UP, &hdev->flags))
403                 return -EBUSY;
404
405         if (copy_from_user(buf, user_buf, buf_size))
406                 return -EFAULT;
407
408         buf[buf_size] = '\0';
409         if (strtobool(buf, &enable))
410                 return -EINVAL;
411
412         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
413                 return -EALREADY;
414
415         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
416
417         return count;
418 }
419
420 static const struct file_operations force_sc_support_fops = {
421         .open           = simple_open,
422         .read           = force_sc_support_read,
423         .write          = force_sc_support_write,
424         .llseek         = default_llseek,
425 };
426
427 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428                                  size_t count, loff_t *ppos)
429 {
430         struct hci_dev *hdev = file->private_data;
431         char buf[3];
432
433         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
434         buf[1] = '\n';
435         buf[2] = '\0';
436         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
437 }
438
439 static const struct file_operations sc_only_mode_fops = {
440         .open           = simple_open,
441         .read           = sc_only_mode_read,
442         .llseek         = default_llseek,
443 };
444
445 static int idle_timeout_set(void *data, u64 val)
446 {
447         struct hci_dev *hdev = data;
448
449         if (val != 0 && (val < 500 || val > 3600000))
450                 return -EINVAL;
451
452         hci_dev_lock(hdev);
453         hdev->idle_timeout = val;
454         hci_dev_unlock(hdev);
455
456         return 0;
457 }
458
459 static int idle_timeout_get(void *data, u64 *val)
460 {
461         struct hci_dev *hdev = data;
462
463         hci_dev_lock(hdev);
464         *val = hdev->idle_timeout;
465         hci_dev_unlock(hdev);
466
467         return 0;
468 }
469
470 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471                         idle_timeout_set, "%llu\n");
472
473 static int rpa_timeout_set(void *data, u64 val)
474 {
475         struct hci_dev *hdev = data;
476
477         /* Require the RPA timeout to be at least 30 seconds and at most
478          * 24 hours.
479          */
480         if (val < 30 || val > (60 * 60 * 24))
481                 return -EINVAL;
482
483         hci_dev_lock(hdev);
484         hdev->rpa_timeout = val;
485         hci_dev_unlock(hdev);
486
487         return 0;
488 }
489
490 static int rpa_timeout_get(void *data, u64 *val)
491 {
492         struct hci_dev *hdev = data;
493
494         hci_dev_lock(hdev);
495         *val = hdev->rpa_timeout;
496         hci_dev_unlock(hdev);
497
498         return 0;
499 }
500
501 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502                         rpa_timeout_set, "%llu\n");
503
504 static int sniff_min_interval_set(void *data, u64 val)
505 {
506         struct hci_dev *hdev = data;
507
508         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
509                 return -EINVAL;
510
511         hci_dev_lock(hdev);
512         hdev->sniff_min_interval = val;
513         hci_dev_unlock(hdev);
514
515         return 0;
516 }
517
518 static int sniff_min_interval_get(void *data, u64 *val)
519 {
520         struct hci_dev *hdev = data;
521
522         hci_dev_lock(hdev);
523         *val = hdev->sniff_min_interval;
524         hci_dev_unlock(hdev);
525
526         return 0;
527 }
528
529 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530                         sniff_min_interval_set, "%llu\n");
531
532 static int sniff_max_interval_set(void *data, u64 val)
533 {
534         struct hci_dev *hdev = data;
535
536         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
537                 return -EINVAL;
538
539         hci_dev_lock(hdev);
540         hdev->sniff_max_interval = val;
541         hci_dev_unlock(hdev);
542
543         return 0;
544 }
545
546 static int sniff_max_interval_get(void *data, u64 *val)
547 {
548         struct hci_dev *hdev = data;
549
550         hci_dev_lock(hdev);
551         *val = hdev->sniff_max_interval;
552         hci_dev_unlock(hdev);
553
554         return 0;
555 }
556
557 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558                         sniff_max_interval_set, "%llu\n");
559
560 static int conn_info_min_age_set(void *data, u64 val)
561 {
562         struct hci_dev *hdev = data;
563
564         if (val == 0 || val > hdev->conn_info_max_age)
565                 return -EINVAL;
566
567         hci_dev_lock(hdev);
568         hdev->conn_info_min_age = val;
569         hci_dev_unlock(hdev);
570
571         return 0;
572 }
573
574 static int conn_info_min_age_get(void *data, u64 *val)
575 {
576         struct hci_dev *hdev = data;
577
578         hci_dev_lock(hdev);
579         *val = hdev->conn_info_min_age;
580         hci_dev_unlock(hdev);
581
582         return 0;
583 }
584
585 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586                         conn_info_min_age_set, "%llu\n");
587
588 static int conn_info_max_age_set(void *data, u64 val)
589 {
590         struct hci_dev *hdev = data;
591
592         if (val == 0 || val < hdev->conn_info_min_age)
593                 return -EINVAL;
594
595         hci_dev_lock(hdev);
596         hdev->conn_info_max_age = val;
597         hci_dev_unlock(hdev);
598
599         return 0;
600 }
601
602 static int conn_info_max_age_get(void *data, u64 *val)
603 {
604         struct hci_dev *hdev = data;
605
606         hci_dev_lock(hdev);
607         *val = hdev->conn_info_max_age;
608         hci_dev_unlock(hdev);
609
610         return 0;
611 }
612
613 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614                         conn_info_max_age_set, "%llu\n");
615
616 static int identity_show(struct seq_file *f, void *p)
617 {
618         struct hci_dev *hdev = f->private;
619         bdaddr_t addr;
620         u8 addr_type;
621
622         hci_dev_lock(hdev);
623
624         hci_copy_identity_address(hdev, &addr, &addr_type);
625
626         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
627                    16, hdev->irk, &hdev->rpa);
628
629         hci_dev_unlock(hdev);
630
631         return 0;
632 }
633
634 static int identity_open(struct inode *inode, struct file *file)
635 {
636         return single_open(file, identity_show, inode->i_private);
637 }
638
639 static const struct file_operations identity_fops = {
640         .open           = identity_open,
641         .read           = seq_read,
642         .llseek         = seq_lseek,
643         .release        = single_release,
644 };
645
646 static int random_address_show(struct seq_file *f, void *p)
647 {
648         struct hci_dev *hdev = f->private;
649
650         hci_dev_lock(hdev);
651         seq_printf(f, "%pMR\n", &hdev->random_addr);
652         hci_dev_unlock(hdev);
653
654         return 0;
655 }
656
657 static int random_address_open(struct inode *inode, struct file *file)
658 {
659         return single_open(file, random_address_show, inode->i_private);
660 }
661
662 static const struct file_operations random_address_fops = {
663         .open           = random_address_open,
664         .read           = seq_read,
665         .llseek         = seq_lseek,
666         .release        = single_release,
667 };
668
669 static int static_address_show(struct seq_file *f, void *p)
670 {
671         struct hci_dev *hdev = f->private;
672
673         hci_dev_lock(hdev);
674         seq_printf(f, "%pMR\n", &hdev->static_addr);
675         hci_dev_unlock(hdev);
676
677         return 0;
678 }
679
680 static int static_address_open(struct inode *inode, struct file *file)
681 {
682         return single_open(file, static_address_show, inode->i_private);
683 }
684
685 static const struct file_operations static_address_fops = {
686         .open           = static_address_open,
687         .read           = seq_read,
688         .llseek         = seq_lseek,
689         .release        = single_release,
690 };
691
692 static ssize_t force_static_address_read(struct file *file,
693                                          char __user *user_buf,
694                                          size_t count, loff_t *ppos)
695 {
696         struct hci_dev *hdev = file->private_data;
697         char buf[3];
698
699         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
700         buf[1] = '\n';
701         buf[2] = '\0';
702         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
703 }
704
705 static ssize_t force_static_address_write(struct file *file,
706                                           const char __user *user_buf,
707                                           size_t count, loff_t *ppos)
708 {
709         struct hci_dev *hdev = file->private_data;
710         char buf[32];
711         size_t buf_size = min(count, (sizeof(buf)-1));
712         bool enable;
713
714         if (test_bit(HCI_UP, &hdev->flags))
715                 return -EBUSY;
716
717         if (copy_from_user(buf, user_buf, buf_size))
718                 return -EFAULT;
719
720         buf[buf_size] = '\0';
721         if (strtobool(buf, &enable))
722                 return -EINVAL;
723
724         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
725                 return -EALREADY;
726
727         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
728
729         return count;
730 }
731
732 static const struct file_operations force_static_address_fops = {
733         .open           = simple_open,
734         .read           = force_static_address_read,
735         .write          = force_static_address_write,
736         .llseek         = default_llseek,
737 };
738
739 static int white_list_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct bdaddr_list *b;
743
744         hci_dev_lock(hdev);
745         list_for_each_entry(b, &hdev->le_white_list, list)
746                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747         hci_dev_unlock(hdev);
748
749         return 0;
750 }
751
752 static int white_list_open(struct inode *inode, struct file *file)
753 {
754         return single_open(file, white_list_show, inode->i_private);
755 }
756
757 static const struct file_operations white_list_fops = {
758         .open           = white_list_open,
759         .read           = seq_read,
760         .llseek         = seq_lseek,
761         .release        = single_release,
762 };
763
764 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
765 {
766         struct hci_dev *hdev = f->private;
767         struct list_head *p, *n;
768
769         hci_dev_lock(hdev);
770         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773                            &irk->bdaddr, irk->addr_type,
774                            16, irk->val, &irk->rpa);
775         }
776         hci_dev_unlock(hdev);
777
778         return 0;
779 }
780
781 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
782 {
783         return single_open(file, identity_resolving_keys_show,
784                            inode->i_private);
785 }
786
787 static const struct file_operations identity_resolving_keys_fops = {
788         .open           = identity_resolving_keys_open,
789         .read           = seq_read,
790         .llseek         = seq_lseek,
791         .release        = single_release,
792 };
793
794 static int long_term_keys_show(struct seq_file *f, void *ptr)
795 {
796         struct hci_dev *hdev = f->private;
797         struct list_head *p, *n;
798
799         hci_dev_lock(hdev);
800         list_for_each_safe(p, n, &hdev->long_term_keys) {
801                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
802                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
803                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
805                            __le64_to_cpu(ltk->rand), 16, ltk->val);
806         }
807         hci_dev_unlock(hdev);
808
809         return 0;
810 }
811
812 static int long_term_keys_open(struct inode *inode, struct file *file)
813 {
814         return single_open(file, long_term_keys_show, inode->i_private);
815 }
816
817 static const struct file_operations long_term_keys_fops = {
818         .open           = long_term_keys_open,
819         .read           = seq_read,
820         .llseek         = seq_lseek,
821         .release        = single_release,
822 };
823
824 static int conn_min_interval_set(void *data, u64 val)
825 {
826         struct hci_dev *hdev = data;
827
828         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
829                 return -EINVAL;
830
831         hci_dev_lock(hdev);
832         hdev->le_conn_min_interval = val;
833         hci_dev_unlock(hdev);
834
835         return 0;
836 }
837
838 static int conn_min_interval_get(void *data, u64 *val)
839 {
840         struct hci_dev *hdev = data;
841
842         hci_dev_lock(hdev);
843         *val = hdev->le_conn_min_interval;
844         hci_dev_unlock(hdev);
845
846         return 0;
847 }
848
849 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850                         conn_min_interval_set, "%llu\n");
851
852 static int conn_max_interval_set(void *data, u64 val)
853 {
854         struct hci_dev *hdev = data;
855
856         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
857                 return -EINVAL;
858
859         hci_dev_lock(hdev);
860         hdev->le_conn_max_interval = val;
861         hci_dev_unlock(hdev);
862
863         return 0;
864 }
865
866 static int conn_max_interval_get(void *data, u64 *val)
867 {
868         struct hci_dev *hdev = data;
869
870         hci_dev_lock(hdev);
871         *val = hdev->le_conn_max_interval;
872         hci_dev_unlock(hdev);
873
874         return 0;
875 }
876
877 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878                         conn_max_interval_set, "%llu\n");
879
880 static int conn_latency_set(void *data, u64 val)
881 {
882         struct hci_dev *hdev = data;
883
884         if (val > 0x01f3)
885                 return -EINVAL;
886
887         hci_dev_lock(hdev);
888         hdev->le_conn_latency = val;
889         hci_dev_unlock(hdev);
890
891         return 0;
892 }
893
894 static int conn_latency_get(void *data, u64 *val)
895 {
896         struct hci_dev *hdev = data;
897
898         hci_dev_lock(hdev);
899         *val = hdev->le_conn_latency;
900         hci_dev_unlock(hdev);
901
902         return 0;
903 }
904
905 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906                         conn_latency_set, "%llu\n");
907
908 static int supervision_timeout_set(void *data, u64 val)
909 {
910         struct hci_dev *hdev = data;
911
912         if (val < 0x000a || val > 0x0c80)
913                 return -EINVAL;
914
915         hci_dev_lock(hdev);
916         hdev->le_supv_timeout = val;
917         hci_dev_unlock(hdev);
918
919         return 0;
920 }
921
922 static int supervision_timeout_get(void *data, u64 *val)
923 {
924         struct hci_dev *hdev = data;
925
926         hci_dev_lock(hdev);
927         *val = hdev->le_supv_timeout;
928         hci_dev_unlock(hdev);
929
930         return 0;
931 }
932
933 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934                         supervision_timeout_set, "%llu\n");
935
936 static int adv_channel_map_set(void *data, u64 val)
937 {
938         struct hci_dev *hdev = data;
939
940         if (val < 0x01 || val > 0x07)
941                 return -EINVAL;
942
943         hci_dev_lock(hdev);
944         hdev->le_adv_channel_map = val;
945         hci_dev_unlock(hdev);
946
947         return 0;
948 }
949
950 static int adv_channel_map_get(void *data, u64 *val)
951 {
952         struct hci_dev *hdev = data;
953
954         hci_dev_lock(hdev);
955         *val = hdev->le_adv_channel_map;
956         hci_dev_unlock(hdev);
957
958         return 0;
959 }
960
961 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962                         adv_channel_map_set, "%llu\n");
963
964 static int device_list_show(struct seq_file *f, void *ptr)
965 {
966         struct hci_dev *hdev = f->private;
967         struct hci_conn_params *p;
968
969         hci_dev_lock(hdev);
970         list_for_each_entry(p, &hdev->le_conn_params, list) {
971                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
972                            p->auto_connect);
973         }
974         hci_dev_unlock(hdev);
975
976         return 0;
977 }
978
979 static int device_list_open(struct inode *inode, struct file *file)
980 {
981         return single_open(file, device_list_show, inode->i_private);
982 }
983
984 static const struct file_operations device_list_fops = {
985         .open           = device_list_open,
986         .read           = seq_read,
987         .llseek         = seq_lseek,
988         .release        = single_release,
989 };
990
991 /* ---- HCI requests ---- */
992
993 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
994 {
995         BT_DBG("%s result 0x%2.2x", hdev->name, result);
996
997         if (hdev->req_status == HCI_REQ_PEND) {
998                 hdev->req_result = result;
999                 hdev->req_status = HCI_REQ_DONE;
1000                 wake_up_interruptible(&hdev->req_wait_q);
1001         }
1002 }
1003
1004 static void hci_req_cancel(struct hci_dev *hdev, int err)
1005 {
1006         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1007
1008         if (hdev->req_status == HCI_REQ_PEND) {
1009                 hdev->req_result = err;
1010                 hdev->req_status = HCI_REQ_CANCELED;
1011                 wake_up_interruptible(&hdev->req_wait_q);
1012         }
1013 }
1014
1015 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1016                                             u8 event)
1017 {
1018         struct hci_ev_cmd_complete *ev;
1019         struct hci_event_hdr *hdr;
1020         struct sk_buff *skb;
1021
1022         hci_dev_lock(hdev);
1023
1024         skb = hdev->recv_evt;
1025         hdev->recv_evt = NULL;
1026
1027         hci_dev_unlock(hdev);
1028
1029         if (!skb)
1030                 return ERR_PTR(-ENODATA);
1031
1032         if (skb->len < sizeof(*hdr)) {
1033                 BT_ERR("Too short HCI event");
1034                 goto failed;
1035         }
1036
1037         hdr = (void *) skb->data;
1038         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1039
1040         if (event) {
1041                 if (hdr->evt != event)
1042                         goto failed;
1043                 return skb;
1044         }
1045
1046         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1048                 goto failed;
1049         }
1050
1051         if (skb->len < sizeof(*ev)) {
1052                 BT_ERR("Too short cmd_complete event");
1053                 goto failed;
1054         }
1055
1056         ev = (void *) skb->data;
1057         skb_pull(skb, sizeof(*ev));
1058
1059         if (opcode == __le16_to_cpu(ev->opcode))
1060                 return skb;
1061
1062         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063                __le16_to_cpu(ev->opcode));
1064
1065 failed:
1066         kfree_skb(skb);
1067         return ERR_PTR(-ENODATA);
1068 }
1069
1070 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1071                                   const void *param, u8 event, u32 timeout)
1072 {
1073         DECLARE_WAITQUEUE(wait, current);
1074         struct hci_request req;
1075         int err = 0;
1076
1077         BT_DBG("%s", hdev->name);
1078
1079         hci_req_init(&req, hdev);
1080
1081         hci_req_add_ev(&req, opcode, plen, param, event);
1082
1083         hdev->req_status = HCI_REQ_PEND;
1084
1085         err = hci_req_run(&req, hci_req_sync_complete);
1086         if (err < 0)
1087                 return ERR_PTR(err);
1088
1089         add_wait_queue(&hdev->req_wait_q, &wait);
1090         set_current_state(TASK_INTERRUPTIBLE);
1091
1092         schedule_timeout(timeout);
1093
1094         remove_wait_queue(&hdev->req_wait_q, &wait);
1095
1096         if (signal_pending(current))
1097                 return ERR_PTR(-EINTR);
1098
1099         switch (hdev->req_status) {
1100         case HCI_REQ_DONE:
1101                 err = -bt_to_errno(hdev->req_result);
1102                 break;
1103
1104         case HCI_REQ_CANCELED:
1105                 err = -hdev->req_result;
1106                 break;
1107
1108         default:
1109                 err = -ETIMEDOUT;
1110                 break;
1111         }
1112
1113         hdev->req_status = hdev->req_result = 0;
1114
1115         BT_DBG("%s end: err %d", hdev->name, err);
1116
1117         if (err < 0)
1118                 return ERR_PTR(err);
1119
1120         return hci_get_cmd_complete(hdev, opcode, event);
1121 }
1122 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1123
1124 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1125                                const void *param, u32 timeout)
1126 {
1127         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1128 }
1129 EXPORT_SYMBOL(__hci_cmd_sync);
1130
1131 /* Execute request and wait for completion. */
1132 static int __hci_req_sync(struct hci_dev *hdev,
1133                           void (*func)(struct hci_request *req,
1134                                       unsigned long opt),
1135                           unsigned long opt, __u32 timeout)
1136 {
1137         struct hci_request req;
1138         DECLARE_WAITQUEUE(wait, current);
1139         int err = 0;
1140
1141         BT_DBG("%s start", hdev->name);
1142
1143         hci_req_init(&req, hdev);
1144
1145         hdev->req_status = HCI_REQ_PEND;
1146
1147         func(&req, opt);
1148
1149         err = hci_req_run(&req, hci_req_sync_complete);
1150         if (err < 0) {
1151                 hdev->req_status = 0;
1152
1153                 /* ENODATA means the HCI request command queue is empty.
1154                  * This can happen when a request with conditionals doesn't
1155                  * trigger any commands to be sent. This is normal behavior
1156                  * and should not trigger an error return.
1157                  */
1158                 if (err == -ENODATA)
1159                         return 0;
1160
1161                 return err;
1162         }
1163
1164         add_wait_queue(&hdev->req_wait_q, &wait);
1165         set_current_state(TASK_INTERRUPTIBLE);
1166
1167         schedule_timeout(timeout);
1168
1169         remove_wait_queue(&hdev->req_wait_q, &wait);
1170
1171         if (signal_pending(current))
1172                 return -EINTR;
1173
1174         switch (hdev->req_status) {
1175         case HCI_REQ_DONE:
1176                 err = -bt_to_errno(hdev->req_result);
1177                 break;
1178
1179         case HCI_REQ_CANCELED:
1180                 err = -hdev->req_result;
1181                 break;
1182
1183         default:
1184                 err = -ETIMEDOUT;
1185                 break;
1186         }
1187
1188         hdev->req_status = hdev->req_result = 0;
1189
1190         BT_DBG("%s end: err %d", hdev->name, err);
1191
1192         return err;
1193 }
1194
1195 static int hci_req_sync(struct hci_dev *hdev,
1196                         void (*req)(struct hci_request *req,
1197                                     unsigned long opt),
1198                         unsigned long opt, __u32 timeout)
1199 {
1200         int ret;
1201
1202         if (!test_bit(HCI_UP, &hdev->flags))
1203                 return -ENETDOWN;
1204
1205         /* Serialize all requests */
1206         hci_req_lock(hdev);
1207         ret = __hci_req_sync(hdev, req, opt, timeout);
1208         hci_req_unlock(hdev);
1209
1210         return ret;
1211 }
1212
1213 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1214 {
1215         BT_DBG("%s %ld", req->hdev->name, opt);
1216
1217         /* Reset device */
1218         set_bit(HCI_RESET, &req->hdev->flags);
1219         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1220 }
1221
1222 static void bredr_init(struct hci_request *req)
1223 {
1224         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1225
1226         /* Read Local Supported Features */
1227         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1228
1229         /* Read Local Version */
1230         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1231
1232         /* Read BD Address */
1233         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1234 }
1235
1236 static void amp_init(struct hci_request *req)
1237 {
1238         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1239
1240         /* Read Local Version */
1241         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1242
1243         /* Read Local Supported Commands */
1244         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1245
1246         /* Read Local Supported Features */
1247         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1248
1249         /* Read Local AMP Info */
1250         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1251
1252         /* Read Data Blk size */
1253         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1254
1255         /* Read Flow Control Mode */
1256         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1257
1258         /* Read Location Data */
1259         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1260 }
1261
1262 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         BT_DBG("%s %ld", hdev->name, opt);
1267
1268         /* Reset */
1269         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1270                 hci_reset_req(req, 0);
1271
1272         switch (hdev->dev_type) {
1273         case HCI_BREDR:
1274                 bredr_init(req);
1275                 break;
1276
1277         case HCI_AMP:
1278                 amp_init(req);
1279                 break;
1280
1281         default:
1282                 BT_ERR("Unknown device type %d", hdev->dev_type);
1283                 break;
1284         }
1285 }
1286
1287 static void bredr_setup(struct hci_request *req)
1288 {
1289         struct hci_dev *hdev = req->hdev;
1290
1291         __le16 param;
1292         __u8 flt_type;
1293
1294         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1295         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1296
1297         /* Read Class of Device */
1298         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1299
1300         /* Read Local Name */
1301         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1302
1303         /* Read Voice Setting */
1304         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1305
1306         /* Read Number of Supported IAC */
1307         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1308
1309         /* Read Current IAC LAP */
1310         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1311
1312         /* Clear Event Filters */
1313         flt_type = HCI_FLT_CLEAR_ALL;
1314         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1315
1316         /* Connection accept timeout ~20 secs */
1317         param = cpu_to_le16(0x7d00);
1318         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1319
1320         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321          * but it does not support page scan related HCI commands.
1322          */
1323         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1324                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1326         }
1327 }
1328
1329 static void le_setup(struct hci_request *req)
1330 {
1331         struct hci_dev *hdev = req->hdev;
1332
1333         /* Read LE Buffer Size */
1334         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1335
1336         /* Read LE Local Supported Features */
1337         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1338
1339         /* Read LE Supported States */
1340         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1341
1342         /* Read LE Advertising Channel TX Power */
1343         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1344
1345         /* Read LE White List Size */
1346         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1347
1348         /* Clear LE White List */
1349         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1350
1351         /* LE-only controllers have LE implicitly enabled */
1352         if (!lmp_bredr_capable(hdev))
1353                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1354 }
1355
1356 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1357 {
1358         if (lmp_ext_inq_capable(hdev))
1359                 return 0x02;
1360
1361         if (lmp_inq_rssi_capable(hdev))
1362                 return 0x01;
1363
1364         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365             hdev->lmp_subver == 0x0757)
1366                 return 0x01;
1367
1368         if (hdev->manufacturer == 15) {
1369                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1370                         return 0x01;
1371                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1372                         return 0x01;
1373                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1374                         return 0x01;
1375         }
1376
1377         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378             hdev->lmp_subver == 0x1805)
1379                 return 0x01;
1380
1381         return 0x00;
1382 }
1383
1384 static void hci_setup_inquiry_mode(struct hci_request *req)
1385 {
1386         u8 mode;
1387
1388         mode = hci_get_inquiry_mode(req->hdev);
1389
1390         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1391 }
1392
1393 static void hci_setup_event_mask(struct hci_request *req)
1394 {
1395         struct hci_dev *hdev = req->hdev;
1396
1397         /* The second byte is 0xff instead of 0x9f (two reserved bits
1398          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399          * command otherwise.
1400          */
1401         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1402
1403         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404          * any event mask for pre 1.2 devices.
1405          */
1406         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1407                 return;
1408
1409         if (lmp_bredr_capable(hdev)) {
1410                 events[4] |= 0x01; /* Flow Specification Complete */
1411                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413                 events[5] |= 0x08; /* Synchronous Connection Complete */
1414                 events[5] |= 0x10; /* Synchronous Connection Changed */
1415         } else {
1416                 /* Use a different default for LE-only devices */
1417                 memset(events, 0, sizeof(events));
1418                 events[0] |= 0x10; /* Disconnection Complete */
1419                 events[0] |= 0x80; /* Encryption Change */
1420                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421                 events[1] |= 0x20; /* Command Complete */
1422                 events[1] |= 0x40; /* Command Status */
1423                 events[1] |= 0x80; /* Hardware Error */
1424                 events[2] |= 0x04; /* Number of Completed Packets */
1425                 events[3] |= 0x02; /* Data Buffer Overflow */
1426                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1427         }
1428
1429         if (lmp_inq_rssi_capable(hdev))
1430                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1431
1432         if (lmp_sniffsubr_capable(hdev))
1433                 events[5] |= 0x20; /* Sniff Subrating */
1434
1435         if (lmp_pause_enc_capable(hdev))
1436                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1437
1438         if (lmp_ext_inq_capable(hdev))
1439                 events[5] |= 0x40; /* Extended Inquiry Result */
1440
1441         if (lmp_no_flush_capable(hdev))
1442                 events[7] |= 0x01; /* Enhanced Flush Complete */
1443
1444         if (lmp_lsto_capable(hdev))
1445                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1446
1447         if (lmp_ssp_capable(hdev)) {
1448                 events[6] |= 0x01;      /* IO Capability Request */
1449                 events[6] |= 0x02;      /* IO Capability Response */
1450                 events[6] |= 0x04;      /* User Confirmation Request */
1451                 events[6] |= 0x08;      /* User Passkey Request */
1452                 events[6] |= 0x10;      /* Remote OOB Data Request */
1453                 events[6] |= 0x20;      /* Simple Pairing Complete */
1454                 events[7] |= 0x04;      /* User Passkey Notification */
1455                 events[7] |= 0x08;      /* Keypress Notification */
1456                 events[7] |= 0x10;      /* Remote Host Supported
1457                                          * Features Notification
1458                                          */
1459         }
1460
1461         if (lmp_le_capable(hdev))
1462                 events[7] |= 0x20;      /* LE Meta-Event */
1463
1464         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1465 }
1466
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1468 {
1469         struct hci_dev *hdev = req->hdev;
1470
1471         if (lmp_bredr_capable(hdev))
1472                 bredr_setup(req);
1473         else
1474                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1475
1476         if (lmp_le_capable(hdev))
1477                 le_setup(req);
1478
1479         hci_setup_event_mask(req);
1480
1481         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482          * local supported commands HCI command.
1483          */
1484         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1486
1487         if (lmp_ssp_capable(hdev)) {
1488                 /* When SSP is available, then the host features page
1489                  * should also be available as well. However some
1490                  * controllers list the max_page as 0 as long as SSP
1491                  * has not been enabled. To achieve proper debugging
1492                  * output, force the minimum max_page to 1 at least.
1493                  */
1494                 hdev->max_page = 0x01;
1495
1496                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1497                         u8 mode = 0x01;
1498                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499                                     sizeof(mode), &mode);
1500                 } else {
1501                         struct hci_cp_write_eir cp;
1502
1503                         memset(hdev->eir, 0, sizeof(hdev->eir));
1504                         memset(&cp, 0, sizeof(cp));
1505
1506                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1507                 }
1508         }
1509
1510         if (lmp_inq_rssi_capable(hdev))
1511                 hci_setup_inquiry_mode(req);
1512
1513         if (lmp_inq_tx_pwr_capable(hdev))
1514                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1515
1516         if (lmp_ext_feat_capable(hdev)) {
1517                 struct hci_cp_read_local_ext_features cp;
1518
1519                 cp.page = 0x01;
1520                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1521                             sizeof(cp), &cp);
1522         }
1523
1524         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1525                 u8 enable = 1;
1526                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1527                             &enable);
1528         }
1529 }
1530
1531 static void hci_setup_link_policy(struct hci_request *req)
1532 {
1533         struct hci_dev *hdev = req->hdev;
1534         struct hci_cp_write_def_link_policy cp;
1535         u16 link_policy = 0;
1536
1537         if (lmp_rswitch_capable(hdev))
1538                 link_policy |= HCI_LP_RSWITCH;
1539         if (lmp_hold_capable(hdev))
1540                 link_policy |= HCI_LP_HOLD;
1541         if (lmp_sniff_capable(hdev))
1542                 link_policy |= HCI_LP_SNIFF;
1543         if (lmp_park_capable(hdev))
1544                 link_policy |= HCI_LP_PARK;
1545
1546         cp.policy = cpu_to_le16(link_policy);
1547         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1548 }
1549
1550 static void hci_set_le_support(struct hci_request *req)
1551 {
1552         struct hci_dev *hdev = req->hdev;
1553         struct hci_cp_write_le_host_supported cp;
1554
1555         /* LE-only devices do not support explicit enablement */
1556         if (!lmp_bredr_capable(hdev))
1557                 return;
1558
1559         memset(&cp, 0, sizeof(cp));
1560
1561         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1562                 cp.le = 0x01;
1563                 cp.simul = lmp_le_br_capable(hdev);
1564         }
1565
1566         if (cp.le != lmp_host_le_capable(hdev))
1567                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1568                             &cp);
1569 }
1570
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1572 {
1573         struct hci_dev *hdev = req->hdev;
1574         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1575
1576         /* If Connectionless Slave Broadcast master role is supported
1577          * enable all necessary events for it.
1578          */
1579         if (lmp_csb_master_capable(hdev)) {
1580                 events[1] |= 0x40;      /* Triggered Clock Capture */
1581                 events[1] |= 0x80;      /* Synchronization Train Complete */
1582                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1583                 events[2] |= 0x20;      /* CSB Channel Map Change */
1584         }
1585
1586         /* If Connectionless Slave Broadcast slave role is supported
1587          * enable all necessary events for it.
1588          */
1589         if (lmp_csb_slave_capable(hdev)) {
1590                 events[2] |= 0x01;      /* Synchronization Train Received */
1591                 events[2] |= 0x02;      /* CSB Receive */
1592                 events[2] |= 0x04;      /* CSB Timeout */
1593                 events[2] |= 0x08;      /* Truncated Page Complete */
1594         }
1595
1596         /* Enable Authenticated Payload Timeout Expired event if supported */
1597         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1598                 events[2] |= 0x80;
1599
1600         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1601 }
1602
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1604 {
1605         struct hci_dev *hdev = req->hdev;
1606         u8 p;
1607
1608         /* Some Broadcom based Bluetooth controllers do not support the
1609          * Delete Stored Link Key command. They are clearly indicating its
1610          * absence in the bit mask of supported commands.
1611          *
1612          * Check the supported commands and only if the the command is marked
1613          * as supported send it. If not supported assume that the controller
1614          * does not have actual support for stored link keys which makes this
1615          * command redundant anyway.
1616          *
1617          * Some controllers indicate that they support handling deleting
1618          * stored link keys, but they don't. The quirk lets a driver
1619          * just disable this command.
1620          */
1621         if (hdev->commands[6] & 0x80 &&
1622             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623                 struct hci_cp_delete_stored_link_key cp;
1624
1625                 bacpy(&cp.bdaddr, BDADDR_ANY);
1626                 cp.delete_all = 0x01;
1627                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1628                             sizeof(cp), &cp);
1629         }
1630
1631         if (hdev->commands[5] & 0x10)
1632                 hci_setup_link_policy(req);
1633
1634         if (lmp_le_capable(hdev)) {
1635                 u8 events[8];
1636
1637                 memset(events, 0, sizeof(events));
1638                 events[0] = 0x1f;
1639
1640                 /* If controller supports the Connection Parameters Request
1641                  * Link Layer Procedure, enable the corresponding event.
1642                  */
1643                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644                         events[0] |= 0x20;      /* LE Remote Connection
1645                                                  * Parameter Request
1646                                                  */
1647
1648                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1649                             events);
1650
1651                 hci_set_le_support(req);
1652         }
1653
1654         /* Read features beyond page 1 if available */
1655         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656                 struct hci_cp_read_local_ext_features cp;
1657
1658                 cp.page = p;
1659                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1660                             sizeof(cp), &cp);
1661         }
1662 }
1663
1664 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1665 {
1666         struct hci_dev *hdev = req->hdev;
1667
1668         /* Set event mask page 2 if the HCI command for it is supported */
1669         if (hdev->commands[22] & 0x04)
1670                 hci_set_event_mask_page_2(req);
1671
1672         /* Check for Synchronization Train support */
1673         if (lmp_sync_train_capable(hdev))
1674                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1675
1676         /* Enable Secure Connections if supported and configured */
1677         if ((lmp_sc_capable(hdev) ||
1678              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1679             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1680                 u8 support = 0x01;
1681                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682                             sizeof(support), &support);
1683         }
1684 }
1685
1686 static int __hci_init(struct hci_dev *hdev)
1687 {
1688         int err;
1689
1690         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1691         if (err < 0)
1692                 return err;
1693
1694         /* The Device Under Test (DUT) mode is special and available for
1695          * all controller types. So just create it early on.
1696          */
1697         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1699                                     &dut_mode_fops);
1700         }
1701
1702         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703          * BR/EDR/LE type controllers. AMP controllers only need the
1704          * first stage init.
1705          */
1706         if (hdev->dev_type != HCI_BREDR)
1707                 return 0;
1708
1709         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1710         if (err < 0)
1711                 return err;
1712
1713         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1714         if (err < 0)
1715                 return err;
1716
1717         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1718         if (err < 0)
1719                 return err;
1720
1721         /* Only create debugfs entries during the initial setup
1722          * phase and not every time the controller gets powered on.
1723          */
1724         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725                 return 0;
1726
1727         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1728                             &features_fops);
1729         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730                            &hdev->manufacturer);
1731         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1733         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1734                             &blacklist_fops);
1735         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1736                             &whitelist_fops);
1737         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1738
1739         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740                             &conn_info_min_age_fops);
1741         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742                             &conn_info_max_age_fops);
1743
1744         if (lmp_bredr_capable(hdev)) {
1745                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746                                     hdev, &inquiry_cache_fops);
1747                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748                                     hdev, &link_keys_fops);
1749                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750                                     hdev, &dev_class_fops);
1751                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752                                     hdev, &voice_setting_fops);
1753         }
1754
1755         if (lmp_ssp_capable(hdev)) {
1756                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757                                     hdev, &auto_accept_delay_fops);
1758                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759                                     hdev, &force_sc_support_fops);
1760                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761                                     hdev, &sc_only_mode_fops);
1762         }
1763
1764         if (lmp_sniff_capable(hdev)) {
1765                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766                                     hdev, &idle_timeout_fops);
1767                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768                                     hdev, &sniff_min_interval_fops);
1769                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770                                     hdev, &sniff_max_interval_fops);
1771         }
1772
1773         if (lmp_le_capable(hdev)) {
1774                 debugfs_create_file("identity", 0400, hdev->debugfs,
1775                                     hdev, &identity_fops);
1776                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777                                     hdev, &rpa_timeout_fops);
1778                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779                                     hdev, &random_address_fops);
1780                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781                                     hdev, &static_address_fops);
1782
1783                 /* For controllers with a public address, provide a debug
1784                  * option to force the usage of the configured static
1785                  * address. By default the public address is used.
1786                  */
1787                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788                         debugfs_create_file("force_static_address", 0644,
1789                                             hdev->debugfs, hdev,
1790                                             &force_static_address_fops);
1791
1792                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793                                   &hdev->le_white_list_size);
1794                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1795                                     &white_list_fops);
1796                 debugfs_create_file("identity_resolving_keys", 0400,
1797                                     hdev->debugfs, hdev,
1798                                     &identity_resolving_keys_fops);
1799                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800                                     hdev, &long_term_keys_fops);
1801                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802                                     hdev, &conn_min_interval_fops);
1803                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804                                     hdev, &conn_max_interval_fops);
1805                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806                                     hdev, &conn_latency_fops);
1807                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808                                     hdev, &supervision_timeout_fops);
1809                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810                                     hdev, &adv_channel_map_fops);
1811                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1812                                     &device_list_fops);
1813                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1814                                    hdev->debugfs,
1815                                    &hdev->discov_interleaved_timeout);
1816         }
1817
1818         return 0;
1819 }
1820
1821 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1822 {
1823         struct hci_dev *hdev = req->hdev;
1824
1825         BT_DBG("%s %ld", hdev->name, opt);
1826
1827         /* Reset */
1828         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829                 hci_reset_req(req, 0);
1830
1831         /* Read Local Version */
1832         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1833
1834         /* Read BD Address */
1835         if (hdev->set_bdaddr)
1836                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1837 }
1838
1839 static int __hci_unconf_init(struct hci_dev *hdev)
1840 {
1841         int err;
1842
1843         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1844                 return 0;
1845
1846         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1847         if (err < 0)
1848                 return err;
1849
1850         return 0;
1851 }
1852
1853 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1854 {
1855         __u8 scan = opt;
1856
1857         BT_DBG("%s %x", req->hdev->name, scan);
1858
1859         /* Inquiry and Page scans */
1860         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1861 }
1862
1863 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1864 {
1865         __u8 auth = opt;
1866
1867         BT_DBG("%s %x", req->hdev->name, auth);
1868
1869         /* Authentication */
1870         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1871 }
1872
1873 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1874 {
1875         __u8 encrypt = opt;
1876
1877         BT_DBG("%s %x", req->hdev->name, encrypt);
1878
1879         /* Encryption */
1880         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1881 }
1882
1883 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1884 {
1885         __le16 policy = cpu_to_le16(opt);
1886
1887         BT_DBG("%s %x", req->hdev->name, policy);
1888
1889         /* Default link policy */
1890         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1891 }
1892
1893 /* Get HCI device by index.
1894  * Device is held on return. */
1895 struct hci_dev *hci_dev_get(int index)
1896 {
1897         struct hci_dev *hdev = NULL, *d;
1898
1899         BT_DBG("%d", index);
1900
1901         if (index < 0)
1902                 return NULL;
1903
1904         read_lock(&hci_dev_list_lock);
1905         list_for_each_entry(d, &hci_dev_list, list) {
1906                 if (d->id == index) {
1907                         hdev = hci_dev_hold(d);
1908                         break;
1909                 }
1910         }
1911         read_unlock(&hci_dev_list_lock);
1912         return hdev;
1913 }
1914
1915 /* ---- Inquiry support ---- */
1916
1917 bool hci_discovery_active(struct hci_dev *hdev)
1918 {
1919         struct discovery_state *discov = &hdev->discovery;
1920
1921         switch (discov->state) {
1922         case DISCOVERY_FINDING:
1923         case DISCOVERY_RESOLVING:
1924                 return true;
1925
1926         default:
1927                 return false;
1928         }
1929 }
1930
1931 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1932 {
1933         int old_state = hdev->discovery.state;
1934
1935         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1936
1937         if (old_state == state)
1938                 return;
1939
1940         hdev->discovery.state = state;
1941
1942         switch (state) {
1943         case DISCOVERY_STOPPED:
1944                 hci_update_background_scan(hdev);
1945
1946                 if (old_state != DISCOVERY_STARTING)
1947                         mgmt_discovering(hdev, 0);
1948                 break;
1949         case DISCOVERY_STARTING:
1950                 break;
1951         case DISCOVERY_FINDING:
1952                 mgmt_discovering(hdev, 1);
1953                 break;
1954         case DISCOVERY_RESOLVING:
1955                 break;
1956         case DISCOVERY_STOPPING:
1957                 break;
1958         }
1959 }
1960
1961 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1962 {
1963         struct discovery_state *cache = &hdev->discovery;
1964         struct inquiry_entry *p, *n;
1965
1966         list_for_each_entry_safe(p, n, &cache->all, all) {
1967                 list_del(&p->all);
1968                 kfree(p);
1969         }
1970
1971         INIT_LIST_HEAD(&cache->unknown);
1972         INIT_LIST_HEAD(&cache->resolve);
1973 }
1974
1975 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1976                                                bdaddr_t *bdaddr)
1977 {
1978         struct discovery_state *cache = &hdev->discovery;
1979         struct inquiry_entry *e;
1980
1981         BT_DBG("cache %p, %pMR", cache, bdaddr);
1982
1983         list_for_each_entry(e, &cache->all, all) {
1984                 if (!bacmp(&e->data.bdaddr, bdaddr))
1985                         return e;
1986         }
1987
1988         return NULL;
1989 }
1990
1991 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1992                                                        bdaddr_t *bdaddr)
1993 {
1994         struct discovery_state *cache = &hdev->discovery;
1995         struct inquiry_entry *e;
1996
1997         BT_DBG("cache %p, %pMR", cache, bdaddr);
1998
1999         list_for_each_entry(e, &cache->unknown, list) {
2000                 if (!bacmp(&e->data.bdaddr, bdaddr))
2001                         return e;
2002         }
2003
2004         return NULL;
2005 }
2006
2007 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2008                                                        bdaddr_t *bdaddr,
2009                                                        int state)
2010 {
2011         struct discovery_state *cache = &hdev->discovery;
2012         struct inquiry_entry *e;
2013
2014         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2015
2016         list_for_each_entry(e, &cache->resolve, list) {
2017                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2018                         return e;
2019                 if (!bacmp(&e->data.bdaddr, bdaddr))
2020                         return e;
2021         }
2022
2023         return NULL;
2024 }
2025
2026 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2027                                       struct inquiry_entry *ie)
2028 {
2029         struct discovery_state *cache = &hdev->discovery;
2030         struct list_head *pos = &cache->resolve;
2031         struct inquiry_entry *p;
2032
2033         list_del(&ie->list);
2034
2035         list_for_each_entry(p, &cache->resolve, list) {
2036                 if (p->name_state != NAME_PENDING &&
2037                     abs(p->data.rssi) >= abs(ie->data.rssi))
2038                         break;
2039                 pos = &p->list;
2040         }
2041
2042         list_add(&ie->list, pos);
2043 }
2044
2045 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2046                              bool name_known)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_entry *ie;
2050         u32 flags = 0;
2051
2052         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2053
2054         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2055
2056         if (!data->ssp_mode)
2057                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2058
2059         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2060         if (ie) {
2061                 if (!ie->data.ssp_mode)
2062                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2063
2064                 if (ie->name_state == NAME_NEEDED &&
2065                     data->rssi != ie->data.rssi) {
2066                         ie->data.rssi = data->rssi;
2067                         hci_inquiry_cache_update_resolve(hdev, ie);
2068                 }
2069
2070                 goto update;
2071         }
2072
2073         /* Entry not in the cache. Add new one. */
2074         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2075         if (!ie) {
2076                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2077                 goto done;
2078         }
2079
2080         list_add(&ie->all, &cache->all);
2081
2082         if (name_known) {
2083                 ie->name_state = NAME_KNOWN;
2084         } else {
2085                 ie->name_state = NAME_NOT_KNOWN;
2086                 list_add(&ie->list, &cache->unknown);
2087         }
2088
2089 update:
2090         if (name_known && ie->name_state != NAME_KNOWN &&
2091             ie->name_state != NAME_PENDING) {
2092                 ie->name_state = NAME_KNOWN;
2093                 list_del(&ie->list);
2094         }
2095
2096         memcpy(&ie->data, data, sizeof(*data));
2097         ie->timestamp = jiffies;
2098         cache->timestamp = jiffies;
2099
2100         if (ie->name_state == NAME_NOT_KNOWN)
2101                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2102
2103 done:
2104         return flags;
2105 }
2106
2107 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2108 {
2109         struct discovery_state *cache = &hdev->discovery;
2110         struct inquiry_info *info = (struct inquiry_info *) buf;
2111         struct inquiry_entry *e;
2112         int copied = 0;
2113
2114         list_for_each_entry(e, &cache->all, all) {
2115                 struct inquiry_data *data = &e->data;
2116
2117                 if (copied >= num)
2118                         break;
2119
2120                 bacpy(&info->bdaddr, &data->bdaddr);
2121                 info->pscan_rep_mode    = data->pscan_rep_mode;
2122                 info->pscan_period_mode = data->pscan_period_mode;
2123                 info->pscan_mode        = data->pscan_mode;
2124                 memcpy(info->dev_class, data->dev_class, 3);
2125                 info->clock_offset      = data->clock_offset;
2126
2127                 info++;
2128                 copied++;
2129         }
2130
2131         BT_DBG("cache %p, copied %d", cache, copied);
2132         return copied;
2133 }
2134
2135 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2136 {
2137         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2138         struct hci_dev *hdev = req->hdev;
2139         struct hci_cp_inquiry cp;
2140
2141         BT_DBG("%s", hdev->name);
2142
2143         if (test_bit(HCI_INQUIRY, &hdev->flags))
2144                 return;
2145
2146         /* Start Inquiry */
2147         memcpy(&cp.lap, &ir->lap, 3);
2148         cp.length  = ir->length;
2149         cp.num_rsp = ir->num_rsp;
2150         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2151 }
2152
2153 static int wait_inquiry(void *word)
2154 {
2155         schedule();
2156         return signal_pending(current);
2157 }
2158
2159 int hci_inquiry(void __user *arg)
2160 {
2161         __u8 __user *ptr = arg;
2162         struct hci_inquiry_req ir;
2163         struct hci_dev *hdev;
2164         int err = 0, do_inquiry = 0, max_rsp;
2165         long timeo;
2166         __u8 *buf;
2167
2168         if (copy_from_user(&ir, ptr, sizeof(ir)))
2169                 return -EFAULT;
2170
2171         hdev = hci_dev_get(ir.dev_id);
2172         if (!hdev)
2173                 return -ENODEV;
2174
2175         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2176                 err = -EBUSY;
2177                 goto done;
2178         }
2179
2180         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2181                 err = -EOPNOTSUPP;
2182                 goto done;
2183         }
2184
2185         if (hdev->dev_type != HCI_BREDR) {
2186                 err = -EOPNOTSUPP;
2187                 goto done;
2188         }
2189
2190         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2191                 err = -EOPNOTSUPP;
2192                 goto done;
2193         }
2194
2195         hci_dev_lock(hdev);
2196         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2197             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2198                 hci_inquiry_cache_flush(hdev);
2199                 do_inquiry = 1;
2200         }
2201         hci_dev_unlock(hdev);
2202
2203         timeo = ir.length * msecs_to_jiffies(2000);
2204
2205         if (do_inquiry) {
2206                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2207                                    timeo);
2208                 if (err < 0)
2209                         goto done;
2210
2211                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212                  * cleared). If it is interrupted by a signal, return -EINTR.
2213                  */
2214                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215                                 TASK_INTERRUPTIBLE))
2216                         return -EINTR;
2217         }
2218
2219         /* for unlimited number of responses we will use buffer with
2220          * 255 entries
2221          */
2222         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2223
2224         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225          * copy it to the user space.
2226          */
2227         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2228         if (!buf) {
2229                 err = -ENOMEM;
2230                 goto done;
2231         }
2232
2233         hci_dev_lock(hdev);
2234         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2235         hci_dev_unlock(hdev);
2236
2237         BT_DBG("num_rsp %d", ir.num_rsp);
2238
2239         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2240                 ptr += sizeof(ir);
2241                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2242                                  ir.num_rsp))
2243                         err = -EFAULT;
2244         } else
2245                 err = -EFAULT;
2246
2247         kfree(buf);
2248
2249 done:
2250         hci_dev_put(hdev);
2251         return err;
2252 }
2253
2254 static int hci_dev_do_open(struct hci_dev *hdev)
2255 {
2256         int ret = 0;
2257
2258         BT_DBG("%s %p", hdev->name, hdev);
2259
2260         hci_req_lock(hdev);
2261
2262         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2263                 ret = -ENODEV;
2264                 goto done;
2265         }
2266
2267         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2269                 /* Check for rfkill but allow the HCI setup stage to
2270                  * proceed (which in itself doesn't cause any RF activity).
2271                  */
2272                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2273                         ret = -ERFKILL;
2274                         goto done;
2275                 }
2276
2277                 /* Check for valid public address or a configured static
2278                  * random adddress, but let the HCI setup proceed to
2279                  * be able to determine if there is a public address
2280                  * or not.
2281                  *
2282                  * In case of user channel usage, it is not important
2283                  * if a public address or static random address is
2284                  * available.
2285                  *
2286                  * This check is only valid for BR/EDR controllers
2287                  * since AMP controllers do not have an address.
2288                  */
2289                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290                     hdev->dev_type == HCI_BREDR &&
2291                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293                         ret = -EADDRNOTAVAIL;
2294                         goto done;
2295                 }
2296         }
2297
2298         if (test_bit(HCI_UP, &hdev->flags)) {
2299                 ret = -EALREADY;
2300                 goto done;
2301         }
2302
2303         if (hdev->open(hdev)) {
2304                 ret = -EIO;
2305                 goto done;
2306         }
2307
2308         atomic_set(&hdev->cmd_cnt, 1);
2309         set_bit(HCI_INIT, &hdev->flags);
2310
2311         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2312                 if (hdev->setup)
2313                         ret = hdev->setup(hdev);
2314
2315                 /* The transport driver can set these quirks before
2316                  * creating the HCI device or in its setup callback.
2317                  *
2318                  * In case any of them is set, the controller has to
2319                  * start up as unconfigured.
2320                  */
2321                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2324
2325                 /* For an unconfigured controller it is required to
2326                  * read at least the version information provided by
2327                  * the Read Local Version Information command.
2328                  *
2329                  * If the set_bdaddr driver callback is provided, then
2330                  * also the original Bluetooth public device address
2331                  * will be read using the Read BD Address command.
2332                  */
2333                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334                         ret = __hci_unconf_init(hdev);
2335         }
2336
2337         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338                 /* If public address change is configured, ensure that
2339                  * the address gets programmed. If the driver does not
2340                  * support changing the public address, fail the power
2341                  * on procedure.
2342                  */
2343                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2344                     hdev->set_bdaddr)
2345                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2346                 else
2347                         ret = -EADDRNOTAVAIL;
2348         }
2349
2350         if (!ret) {
2351                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2352                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2353                         ret = __hci_init(hdev);
2354         }
2355
2356         clear_bit(HCI_INIT, &hdev->flags);
2357
2358         if (!ret) {
2359                 hci_dev_hold(hdev);
2360                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2361                 set_bit(HCI_UP, &hdev->flags);
2362                 hci_notify(hdev, HCI_DEV_UP);
2363                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2366                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2367                     hdev->dev_type == HCI_BREDR) {
2368                         hci_dev_lock(hdev);
2369                         mgmt_powered(hdev, 1);
2370                         hci_dev_unlock(hdev);
2371                 }
2372         } else {
2373                 /* Init failed, cleanup */
2374                 flush_work(&hdev->tx_work);
2375                 flush_work(&hdev->cmd_work);
2376                 flush_work(&hdev->rx_work);
2377
2378                 skb_queue_purge(&hdev->cmd_q);
2379                 skb_queue_purge(&hdev->rx_q);
2380
2381                 if (hdev->flush)
2382                         hdev->flush(hdev);
2383
2384                 if (hdev->sent_cmd) {
2385                         kfree_skb(hdev->sent_cmd);
2386                         hdev->sent_cmd = NULL;
2387                 }
2388
2389                 hdev->close(hdev);
2390                 hdev->flags &= BIT(HCI_RAW);
2391         }
2392
2393 done:
2394         hci_req_unlock(hdev);
2395         return ret;
2396 }
2397
2398 /* ---- HCI ioctl helpers ---- */
2399
2400 int hci_dev_open(__u16 dev)
2401 {
2402         struct hci_dev *hdev;
2403         int err;
2404
2405         hdev = hci_dev_get(dev);
2406         if (!hdev)
2407                 return -ENODEV;
2408
2409         /* Devices that are marked as unconfigured can only be powered
2410          * up as user channel. Trying to bring them up as normal devices
2411          * will result into a failure. Only user channel operation is
2412          * possible.
2413          *
2414          * When this function is called for a user channel, the flag
2415          * HCI_USER_CHANNEL will be set first before attempting to
2416          * open the device.
2417          */
2418         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2420                 err = -EOPNOTSUPP;
2421                 goto done;
2422         }
2423
2424         /* We need to ensure that no other power on/off work is pending
2425          * before proceeding to call hci_dev_do_open. This is
2426          * particularly important if the setup procedure has not yet
2427          * completed.
2428          */
2429         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430                 cancel_delayed_work(&hdev->power_off);
2431
2432         /* After this call it is guaranteed that the setup procedure
2433          * has finished. This means that error conditions like RFKILL
2434          * or no valid public or static random address apply.
2435          */
2436         flush_workqueue(hdev->req_workqueue);
2437
2438         err = hci_dev_do_open(hdev);
2439
2440 done:
2441         hci_dev_put(hdev);
2442         return err;
2443 }
2444
2445 /* This function requires the caller holds hdev->lock */
2446 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2447 {
2448         struct hci_conn_params *p;
2449
2450         list_for_each_entry(p, &hdev->le_conn_params, list)
2451                 list_del_init(&p->action);
2452
2453         BT_DBG("All LE pending actions cleared");
2454 }
2455
2456 static int hci_dev_do_close(struct hci_dev *hdev)
2457 {
2458         BT_DBG("%s %p", hdev->name, hdev);
2459
2460         cancel_delayed_work(&hdev->power_off);
2461
2462         hci_req_cancel(hdev, ENODEV);
2463         hci_req_lock(hdev);
2464
2465         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2466                 cancel_delayed_work_sync(&hdev->cmd_timer);
2467                 hci_req_unlock(hdev);
2468                 return 0;
2469         }
2470
2471         /* Flush RX and TX works */
2472         flush_work(&hdev->tx_work);
2473         flush_work(&hdev->rx_work);
2474
2475         if (hdev->discov_timeout > 0) {
2476                 cancel_delayed_work(&hdev->discov_off);
2477                 hdev->discov_timeout = 0;
2478                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2479                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2480         }
2481
2482         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2483                 cancel_delayed_work(&hdev->service_cache);
2484
2485         cancel_delayed_work_sync(&hdev->le_scan_disable);
2486
2487         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488                 cancel_delayed_work_sync(&hdev->rpa_expired);
2489
2490         hci_dev_lock(hdev);
2491         hci_inquiry_cache_flush(hdev);
2492         hci_conn_hash_flush(hdev);
2493         hci_pend_le_actions_clear(hdev);
2494         hci_dev_unlock(hdev);
2495
2496         hci_notify(hdev, HCI_DEV_DOWN);
2497
2498         if (hdev->flush)
2499                 hdev->flush(hdev);
2500
2501         /* Reset device */
2502         skb_queue_purge(&hdev->cmd_q);
2503         atomic_set(&hdev->cmd_cnt, 1);
2504         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2506             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2507                 set_bit(HCI_INIT, &hdev->flags);
2508                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2509                 clear_bit(HCI_INIT, &hdev->flags);
2510         }
2511
2512         /* flush cmd  work */
2513         flush_work(&hdev->cmd_work);
2514
2515         /* Drop queues */
2516         skb_queue_purge(&hdev->rx_q);
2517         skb_queue_purge(&hdev->cmd_q);
2518         skb_queue_purge(&hdev->raw_q);
2519
2520         /* Drop last sent command */
2521         if (hdev->sent_cmd) {
2522                 cancel_delayed_work_sync(&hdev->cmd_timer);
2523                 kfree_skb(hdev->sent_cmd);
2524                 hdev->sent_cmd = NULL;
2525         }
2526
2527         kfree_skb(hdev->recv_evt);
2528         hdev->recv_evt = NULL;
2529
2530         /* After this point our queues are empty
2531          * and no tasks are scheduled. */
2532         hdev->close(hdev);
2533
2534         /* Clear flags */
2535         hdev->flags &= BIT(HCI_RAW);
2536         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2537
2538         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539                 if (hdev->dev_type == HCI_BREDR) {
2540                         hci_dev_lock(hdev);
2541                         mgmt_powered(hdev, 0);
2542                         hci_dev_unlock(hdev);
2543                 }
2544         }
2545
2546         /* Controller radio is available but is currently powered down */
2547         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2548
2549         memset(hdev->eir, 0, sizeof(hdev->eir));
2550         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2551         bacpy(&hdev->random_addr, BDADDR_ANY);
2552
2553         hci_req_unlock(hdev);
2554
2555         hci_dev_put(hdev);
2556         return 0;
2557 }
2558
2559 int hci_dev_close(__u16 dev)
2560 {
2561         struct hci_dev *hdev;
2562         int err;
2563
2564         hdev = hci_dev_get(dev);
2565         if (!hdev)
2566                 return -ENODEV;
2567
2568         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569                 err = -EBUSY;
2570                 goto done;
2571         }
2572
2573         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574                 cancel_delayed_work(&hdev->power_off);
2575
2576         err = hci_dev_do_close(hdev);
2577
2578 done:
2579         hci_dev_put(hdev);
2580         return err;
2581 }
2582
2583 int hci_dev_reset(__u16 dev)
2584 {
2585         struct hci_dev *hdev;
2586         int ret = 0;
2587
2588         hdev = hci_dev_get(dev);
2589         if (!hdev)
2590                 return -ENODEV;
2591
2592         hci_req_lock(hdev);
2593
2594         if (!test_bit(HCI_UP, &hdev->flags)) {
2595                 ret = -ENETDOWN;
2596                 goto done;
2597         }
2598
2599         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600                 ret = -EBUSY;
2601                 goto done;
2602         }
2603
2604         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2605                 ret = -EOPNOTSUPP;
2606                 goto done;
2607         }
2608
2609         /* Drop queues */
2610         skb_queue_purge(&hdev->rx_q);
2611         skb_queue_purge(&hdev->cmd_q);
2612
2613         hci_dev_lock(hdev);
2614         hci_inquiry_cache_flush(hdev);
2615         hci_conn_hash_flush(hdev);
2616         hci_dev_unlock(hdev);
2617
2618         if (hdev->flush)
2619                 hdev->flush(hdev);
2620
2621         atomic_set(&hdev->cmd_cnt, 1);
2622         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2623
2624         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2625
2626 done:
2627         hci_req_unlock(hdev);
2628         hci_dev_put(hdev);
2629         return ret;
2630 }
2631
2632 int hci_dev_reset_stat(__u16 dev)
2633 {
2634         struct hci_dev *hdev;
2635         int ret = 0;
2636
2637         hdev = hci_dev_get(dev);
2638         if (!hdev)
2639                 return -ENODEV;
2640
2641         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2642                 ret = -EBUSY;
2643                 goto done;
2644         }
2645
2646         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2647                 ret = -EOPNOTSUPP;
2648                 goto done;
2649         }
2650
2651         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2652
2653 done:
2654         hci_dev_put(hdev);
2655         return ret;
2656 }
2657
2658 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2659 {
2660         struct hci_dev *hdev;
2661         struct hci_dev_req dr;
2662         int err = 0;
2663
2664         if (copy_from_user(&dr, arg, sizeof(dr)))
2665                 return -EFAULT;
2666
2667         hdev = hci_dev_get(dr.dev_id);
2668         if (!hdev)
2669                 return -ENODEV;
2670
2671         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2672                 err = -EBUSY;
2673                 goto done;
2674         }
2675
2676         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2677                 err = -EOPNOTSUPP;
2678                 goto done;
2679         }
2680
2681         if (hdev->dev_type != HCI_BREDR) {
2682                 err = -EOPNOTSUPP;
2683                 goto done;
2684         }
2685
2686         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2687                 err = -EOPNOTSUPP;
2688                 goto done;
2689         }
2690
2691         switch (cmd) {
2692         case HCISETAUTH:
2693                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2694                                    HCI_INIT_TIMEOUT);
2695                 break;
2696
2697         case HCISETENCRYPT:
2698                 if (!lmp_encrypt_capable(hdev)) {
2699                         err = -EOPNOTSUPP;
2700                         break;
2701                 }
2702
2703                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2704                         /* Auth must be enabled first */
2705                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2706                                            HCI_INIT_TIMEOUT);
2707                         if (err)
2708                                 break;
2709                 }
2710
2711                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2712                                    HCI_INIT_TIMEOUT);
2713                 break;
2714
2715         case HCISETSCAN:
2716                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2717                                    HCI_INIT_TIMEOUT);
2718                 break;
2719
2720         case HCISETLINKPOL:
2721                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2722                                    HCI_INIT_TIMEOUT);
2723                 break;
2724
2725         case HCISETLINKMODE:
2726                 hdev->link_mode = ((__u16) dr.dev_opt) &
2727                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2728                 break;
2729
2730         case HCISETPTYPE:
2731                 hdev->pkt_type = (__u16) dr.dev_opt;
2732                 break;
2733
2734         case HCISETACLMTU:
2735                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2736                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2737                 break;
2738
2739         case HCISETSCOMTU:
2740                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2741                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2742                 break;
2743
2744         default:
2745                 err = -EINVAL;
2746                 break;
2747         }
2748
2749 done:
2750         hci_dev_put(hdev);
2751         return err;
2752 }
2753
2754 int hci_get_dev_list(void __user *arg)
2755 {
2756         struct hci_dev *hdev;
2757         struct hci_dev_list_req *dl;
2758         struct hci_dev_req *dr;
2759         int n = 0, size, err;
2760         __u16 dev_num;
2761
2762         if (get_user(dev_num, (__u16 __user *) arg))
2763                 return -EFAULT;
2764
2765         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2766                 return -EINVAL;
2767
2768         size = sizeof(*dl) + dev_num * sizeof(*dr);
2769
2770         dl = kzalloc(size, GFP_KERNEL);
2771         if (!dl)
2772                 return -ENOMEM;
2773
2774         dr = dl->dev_req;
2775
2776         read_lock(&hci_dev_list_lock);
2777         list_for_each_entry(hdev, &hci_dev_list, list) {
2778                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2779                         cancel_delayed_work(&hdev->power_off);
2780
2781                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2782                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2783
2784                 (dr + n)->dev_id  = hdev->id;
2785                 (dr + n)->dev_opt = hdev->flags;
2786
2787                 if (++n >= dev_num)
2788                         break;
2789         }
2790         read_unlock(&hci_dev_list_lock);
2791
2792         dl->dev_num = n;
2793         size = sizeof(*dl) + n * sizeof(*dr);
2794
2795         err = copy_to_user(arg, dl, size);
2796         kfree(dl);
2797
2798         return err ? -EFAULT : 0;
2799 }
2800
2801 int hci_get_dev_info(void __user *arg)
2802 {
2803         struct hci_dev *hdev;
2804         struct hci_dev_info di;
2805         int err = 0;
2806
2807         if (copy_from_user(&di, arg, sizeof(di)))
2808                 return -EFAULT;
2809
2810         hdev = hci_dev_get(di.dev_id);
2811         if (!hdev)
2812                 return -ENODEV;
2813
2814         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2815                 cancel_delayed_work_sync(&hdev->power_off);
2816
2817         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2818                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2819
2820         strcpy(di.name, hdev->name);
2821         di.bdaddr   = hdev->bdaddr;
2822         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2823         di.flags    = hdev->flags;
2824         di.pkt_type = hdev->pkt_type;
2825         if (lmp_bredr_capable(hdev)) {
2826                 di.acl_mtu  = hdev->acl_mtu;
2827                 di.acl_pkts = hdev->acl_pkts;
2828                 di.sco_mtu  = hdev->sco_mtu;
2829                 di.sco_pkts = hdev->sco_pkts;
2830         } else {
2831                 di.acl_mtu  = hdev->le_mtu;
2832                 di.acl_pkts = hdev->le_pkts;
2833                 di.sco_mtu  = 0;
2834                 di.sco_pkts = 0;
2835         }
2836         di.link_policy = hdev->link_policy;
2837         di.link_mode   = hdev->link_mode;
2838
2839         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2840         memcpy(&di.features, &hdev->features, sizeof(di.features));
2841
2842         if (copy_to_user(arg, &di, sizeof(di)))
2843                 err = -EFAULT;
2844
2845         hci_dev_put(hdev);
2846
2847         return err;
2848 }
2849
2850 /* ---- Interface to HCI drivers ---- */
2851
2852 static int hci_rfkill_set_block(void *data, bool blocked)
2853 {
2854         struct hci_dev *hdev = data;
2855
2856         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2857
2858         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2859                 return -EBUSY;
2860
2861         if (blocked) {
2862                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2863                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2864                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2865                         hci_dev_do_close(hdev);
2866         } else {
2867                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2868         }
2869
2870         return 0;
2871 }
2872
2873 static const struct rfkill_ops hci_rfkill_ops = {
2874         .set_block = hci_rfkill_set_block,
2875 };
2876
2877 static void hci_power_on(struct work_struct *work)
2878 {
2879         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2880         int err;
2881
2882         BT_DBG("%s", hdev->name);
2883
2884         err = hci_dev_do_open(hdev);
2885         if (err < 0) {
2886                 mgmt_set_powered_failed(hdev, err);
2887                 return;
2888         }
2889
2890         /* During the HCI setup phase, a few error conditions are
2891          * ignored and they need to be checked now. If they are still
2892          * valid, it is important to turn the device back off.
2893          */
2894         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2895             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2896             (hdev->dev_type == HCI_BREDR &&
2897              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2898              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2899                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2900                 hci_dev_do_close(hdev);
2901         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2902                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2903                                    HCI_AUTO_OFF_TIMEOUT);
2904         }
2905
2906         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2907                 /* For unconfigured devices, set the HCI_RAW flag
2908                  * so that userspace can easily identify them.
2909                  */
2910                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2911                         set_bit(HCI_RAW, &hdev->flags);
2912
2913                 /* For fully configured devices, this will send
2914                  * the Index Added event. For unconfigured devices,
2915                  * it will send Unconfigued Index Added event.
2916                  *
2917                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2918                  * and no event will be send.
2919                  */
2920                 mgmt_index_added(hdev);
2921         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2922                 /* When the controller is now configured, then it
2923                  * is important to clear the HCI_RAW flag.
2924                  */
2925                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2926                         clear_bit(HCI_RAW, &hdev->flags);
2927
2928                 /* Powering on the controller with HCI_CONFIG set only
2929                  * happens with the transition from unconfigured to
2930                  * configured. This will send the Index Added event.
2931                  */
2932                 mgmt_index_added(hdev);
2933         }
2934 }
2935
2936 static void hci_power_off(struct work_struct *work)
2937 {
2938         struct hci_dev *hdev = container_of(work, struct hci_dev,
2939                                             power_off.work);
2940
2941         BT_DBG("%s", hdev->name);
2942
2943         hci_dev_do_close(hdev);
2944 }
2945
2946 static void hci_discov_off(struct work_struct *work)
2947 {
2948         struct hci_dev *hdev;
2949
2950         hdev = container_of(work, struct hci_dev, discov_off.work);
2951
2952         BT_DBG("%s", hdev->name);
2953
2954         mgmt_discoverable_timeout(hdev);
2955 }
2956
2957 void hci_uuids_clear(struct hci_dev *hdev)
2958 {
2959         struct bt_uuid *uuid, *tmp;
2960
2961         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2962                 list_del(&uuid->list);
2963                 kfree(uuid);
2964         }
2965 }
2966
2967 void hci_link_keys_clear(struct hci_dev *hdev)
2968 {
2969         struct list_head *p, *n;
2970
2971         list_for_each_safe(p, n, &hdev->link_keys) {
2972                 struct link_key *key;
2973
2974                 key = list_entry(p, struct link_key, list);
2975
2976                 list_del(p);
2977                 kfree(key);
2978         }
2979 }
2980
2981 void hci_smp_ltks_clear(struct hci_dev *hdev)
2982 {
2983         struct smp_ltk *k, *tmp;
2984
2985         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2986                 list_del(&k->list);
2987                 kfree(k);
2988         }
2989 }
2990
2991 void hci_smp_irks_clear(struct hci_dev *hdev)
2992 {
2993         struct smp_irk *k, *tmp;
2994
2995         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2996                 list_del(&k->list);
2997                 kfree(k);
2998         }
2999 }
3000
3001 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3002 {
3003         struct link_key *k;
3004
3005         list_for_each_entry(k, &hdev->link_keys, list)
3006                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3007                         return k;
3008
3009         return NULL;
3010 }
3011
3012 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3013                                u8 key_type, u8 old_key_type)
3014 {
3015         /* Legacy key */
3016         if (key_type < 0x03)
3017                 return true;
3018
3019         /* Debug keys are insecure so don't store them persistently */
3020         if (key_type == HCI_LK_DEBUG_COMBINATION)
3021                 return false;
3022
3023         /* Changed combination key and there's no previous one */
3024         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3025                 return false;
3026
3027         /* Security mode 3 case */
3028         if (!conn)
3029                 return true;
3030
3031         /* Neither local nor remote side had no-bonding as requirement */
3032         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3033                 return true;
3034
3035         /* Local side had dedicated bonding as requirement */
3036         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3037                 return true;
3038
3039         /* Remote side had dedicated bonding as requirement */
3040         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3041                 return true;
3042
3043         /* If none of the above criteria match, then don't store the key
3044          * persistently */
3045         return false;
3046 }
3047
3048 static bool ltk_type_master(u8 type)
3049 {
3050         return (type == SMP_LTK);
3051 }
3052
3053 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3054                              bool master)
3055 {
3056         struct smp_ltk *k;
3057
3058         list_for_each_entry(k, &hdev->long_term_keys, list) {
3059                 if (k->ediv != ediv || k->rand != rand)
3060                         continue;
3061
3062                 if (ltk_type_master(k->type) != master)
3063                         continue;
3064
3065                 return k;
3066         }
3067
3068         return NULL;
3069 }
3070
3071 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3072                                      u8 addr_type, bool master)
3073 {
3074         struct smp_ltk *k;
3075
3076         list_for_each_entry(k, &hdev->long_term_keys, list)
3077                 if (addr_type == k->bdaddr_type &&
3078                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3079                     ltk_type_master(k->type) == master)
3080                         return k;
3081
3082         return NULL;
3083 }
3084
3085 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3086 {
3087         struct smp_irk *irk;
3088
3089         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3090                 if (!bacmp(&irk->rpa, rpa))
3091                         return irk;
3092         }
3093
3094         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3095                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3096                         bacpy(&irk->rpa, rpa);
3097                         return irk;
3098                 }
3099         }
3100
3101         return NULL;
3102 }
3103
3104 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3105                                      u8 addr_type)
3106 {
3107         struct smp_irk *irk;
3108
3109         /* Identity Address must be public or static random */
3110         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3111                 return NULL;
3112
3113         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3114                 if (addr_type == irk->addr_type &&
3115                     bacmp(bdaddr, &irk->bdaddr) == 0)
3116                         return irk;
3117         }
3118
3119         return NULL;
3120 }
3121
3122 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3123                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3124                                   u8 pin_len, bool *persistent)
3125 {
3126         struct link_key *key, *old_key;
3127         u8 old_key_type;
3128
3129         old_key = hci_find_link_key(hdev, bdaddr);
3130         if (old_key) {
3131                 old_key_type = old_key->type;
3132                 key = old_key;
3133         } else {
3134                 old_key_type = conn ? conn->key_type : 0xff;
3135                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3136                 if (!key)
3137                         return NULL;
3138                 list_add(&key->list, &hdev->link_keys);
3139         }
3140
3141         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3142
3143         /* Some buggy controller combinations generate a changed
3144          * combination key for legacy pairing even when there's no
3145          * previous key */
3146         if (type == HCI_LK_CHANGED_COMBINATION &&
3147             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3148                 type = HCI_LK_COMBINATION;
3149                 if (conn)
3150                         conn->key_type = type;
3151         }
3152
3153         bacpy(&key->bdaddr, bdaddr);
3154         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3155         key->pin_len = pin_len;
3156
3157         if (type == HCI_LK_CHANGED_COMBINATION)
3158                 key->type = old_key_type;
3159         else
3160                 key->type = type;
3161
3162         if (persistent)
3163                 *persistent = hci_persistent_key(hdev, conn, type,
3164                                                  old_key_type);
3165
3166         return key;
3167 }
3168
3169 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3170                             u8 addr_type, u8 type, u8 authenticated,
3171                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3172 {
3173         struct smp_ltk *key, *old_key;
3174         bool master = ltk_type_master(type);
3175
3176         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3177         if (old_key)
3178                 key = old_key;
3179         else {
3180                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3181                 if (!key)
3182                         return NULL;
3183                 list_add(&key->list, &hdev->long_term_keys);
3184         }
3185
3186         bacpy(&key->bdaddr, bdaddr);
3187         key->bdaddr_type = addr_type;
3188         memcpy(key->val, tk, sizeof(key->val));
3189         key->authenticated = authenticated;
3190         key->ediv = ediv;
3191         key->rand = rand;
3192         key->enc_size = enc_size;
3193         key->type = type;
3194
3195         return key;
3196 }
3197
3198 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3199                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3200 {
3201         struct smp_irk *irk;
3202
3203         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3204         if (!irk) {
3205                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3206                 if (!irk)
3207                         return NULL;
3208
3209                 bacpy(&irk->bdaddr, bdaddr);
3210                 irk->addr_type = addr_type;
3211
3212                 list_add(&irk->list, &hdev->identity_resolving_keys);
3213         }
3214
3215         memcpy(irk->val, val, 16);
3216         bacpy(&irk->rpa, rpa);
3217
3218         return irk;
3219 }
3220
3221 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3222 {
3223         struct link_key *key;
3224
3225         key = hci_find_link_key(hdev, bdaddr);
3226         if (!key)
3227                 return -ENOENT;
3228
3229         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3230
3231         list_del(&key->list);
3232         kfree(key);
3233
3234         return 0;
3235 }
3236
3237 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3238 {
3239         struct smp_ltk *k, *tmp;
3240         int removed = 0;
3241
3242         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3243                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3244                         continue;
3245
3246                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3247
3248                 list_del(&k->list);
3249                 kfree(k);
3250                 removed++;
3251         }
3252
3253         return removed ? 0 : -ENOENT;
3254 }
3255
3256 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3257 {
3258         struct smp_irk *k, *tmp;
3259
3260         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3261                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3262                         continue;
3263
3264                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3265
3266                 list_del(&k->list);
3267                 kfree(k);
3268         }
3269 }
3270
3271 /* HCI command timer function */
3272 static void hci_cmd_timeout(struct work_struct *work)
3273 {
3274         struct hci_dev *hdev = container_of(work, struct hci_dev,
3275                                             cmd_timer.work);
3276
3277         if (hdev->sent_cmd) {
3278                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3279                 u16 opcode = __le16_to_cpu(sent->opcode);
3280
3281                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3282         } else {
3283                 BT_ERR("%s command tx timeout", hdev->name);
3284         }
3285
3286         atomic_set(&hdev->cmd_cnt, 1);
3287         queue_work(hdev->workqueue, &hdev->cmd_work);
3288 }
3289
3290 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3291                                           bdaddr_t *bdaddr)
3292 {
3293         struct oob_data *data;
3294
3295         list_for_each_entry(data, &hdev->remote_oob_data, list)
3296                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3297                         return data;
3298
3299         return NULL;
3300 }
3301
3302 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3303 {
3304         struct oob_data *data;
3305
3306         data = hci_find_remote_oob_data(hdev, bdaddr);
3307         if (!data)
3308                 return -ENOENT;
3309
3310         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3311
3312         list_del(&data->list);
3313         kfree(data);
3314
3315         return 0;
3316 }
3317
3318 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3319 {
3320         struct oob_data *data, *n;
3321
3322         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3323                 list_del(&data->list);
3324                 kfree(data);
3325         }
3326 }
3327
3328 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3329                             u8 *hash, u8 *randomizer)
3330 {
3331         struct oob_data *data;
3332
3333         data = hci_find_remote_oob_data(hdev, bdaddr);
3334         if (!data) {
3335                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3336                 if (!data)
3337                         return -ENOMEM;
3338
3339                 bacpy(&data->bdaddr, bdaddr);
3340                 list_add(&data->list, &hdev->remote_oob_data);
3341         }
3342
3343         memcpy(data->hash192, hash, sizeof(data->hash192));
3344         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3345
3346         memset(data->hash256, 0, sizeof(data->hash256));
3347         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3348
3349         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3350
3351         return 0;
3352 }
3353
3354 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3355                                 u8 *hash192, u8 *randomizer192,
3356                                 u8 *hash256, u8 *randomizer256)
3357 {
3358         struct oob_data *data;
3359
3360         data = hci_find_remote_oob_data(hdev, bdaddr);
3361         if (!data) {
3362                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3363                 if (!data)
3364                         return -ENOMEM;
3365
3366                 bacpy(&data->bdaddr, bdaddr);
3367                 list_add(&data->list, &hdev->remote_oob_data);
3368         }
3369
3370         memcpy(data->hash192, hash192, sizeof(data->hash192));
3371         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3372
3373         memcpy(data->hash256, hash256, sizeof(data->hash256));
3374         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3375
3376         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3377
3378         return 0;
3379 }
3380
3381 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3382                                          bdaddr_t *bdaddr, u8 type)
3383 {
3384         struct bdaddr_list *b;
3385
3386         list_for_each_entry(b, bdaddr_list, list) {
3387                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3388                         return b;
3389         }
3390
3391         return NULL;
3392 }
3393
3394 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3395 {
3396         struct list_head *p, *n;
3397
3398         list_for_each_safe(p, n, bdaddr_list) {
3399                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3400
3401                 list_del(p);
3402                 kfree(b);
3403         }
3404 }
3405
3406 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3407 {
3408         struct bdaddr_list *entry;
3409
3410         if (!bacmp(bdaddr, BDADDR_ANY))
3411                 return -EBADF;
3412
3413         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3414                 return -EEXIST;
3415
3416         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3417         if (!entry)
3418                 return -ENOMEM;
3419
3420         bacpy(&entry->bdaddr, bdaddr);
3421         entry->bdaddr_type = type;
3422
3423         list_add(&entry->list, list);
3424
3425         return 0;
3426 }
3427
3428 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3429 {
3430         struct bdaddr_list *entry;
3431
3432         if (!bacmp(bdaddr, BDADDR_ANY)) {
3433                 hci_bdaddr_list_clear(list);
3434                 return 0;
3435         }
3436
3437         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3438         if (!entry)
3439                 return -ENOENT;
3440
3441         list_del(&entry->list);
3442         kfree(entry);
3443
3444         return 0;
3445 }
3446
3447 /* This function requires the caller holds hdev->lock */
3448 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3449                                                bdaddr_t *addr, u8 addr_type)
3450 {
3451         struct hci_conn_params *params;
3452
3453         /* The conn params list only contains identity addresses */
3454         if (!hci_is_identity_address(addr, addr_type))
3455                 return NULL;
3456
3457         list_for_each_entry(params, &hdev->le_conn_params, list) {
3458                 if (bacmp(&params->addr, addr) == 0 &&
3459                     params->addr_type == addr_type) {
3460                         return params;
3461                 }
3462         }
3463
3464         return NULL;
3465 }
3466
3467 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3468 {
3469         struct hci_conn *conn;
3470
3471         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3472         if (!conn)
3473                 return false;
3474
3475         if (conn->dst_type != type)
3476                 return false;
3477
3478         if (conn->state != BT_CONNECTED)
3479                 return false;
3480
3481         return true;
3482 }
3483
3484 /* This function requires the caller holds hdev->lock */
3485 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3486                                                   bdaddr_t *addr, u8 addr_type)
3487 {
3488         struct hci_conn_params *param;
3489
3490         /* The list only contains identity addresses */
3491         if (!hci_is_identity_address(addr, addr_type))
3492                 return NULL;
3493
3494         list_for_each_entry(param, list, action) {
3495                 if (bacmp(&param->addr, addr) == 0 &&
3496                     param->addr_type == addr_type)
3497                         return param;
3498         }
3499
3500         return NULL;
3501 }
3502
3503 /* This function requires the caller holds hdev->lock */
3504 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3505                                             bdaddr_t *addr, u8 addr_type)
3506 {
3507         struct hci_conn_params *params;
3508
3509         if (!hci_is_identity_address(addr, addr_type))
3510                 return NULL;
3511
3512         params = hci_conn_params_lookup(hdev, addr, addr_type);
3513         if (params)
3514                 return params;
3515
3516         params = kzalloc(sizeof(*params), GFP_KERNEL);
3517         if (!params) {
3518                 BT_ERR("Out of memory");
3519                 return NULL;
3520         }
3521
3522         bacpy(&params->addr, addr);
3523         params->addr_type = addr_type;
3524
3525         list_add(&params->list, &hdev->le_conn_params);
3526         INIT_LIST_HEAD(&params->action);
3527
3528         params->conn_min_interval = hdev->le_conn_min_interval;
3529         params->conn_max_interval = hdev->le_conn_max_interval;
3530         params->conn_latency = hdev->le_conn_latency;
3531         params->supervision_timeout = hdev->le_supv_timeout;
3532         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3533
3534         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3535
3536         return params;
3537 }
3538
3539 /* This function requires the caller holds hdev->lock */
3540 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3541                         u8 auto_connect)
3542 {
3543         struct hci_conn_params *params;
3544
3545         params = hci_conn_params_add(hdev, addr, addr_type);
3546         if (!params)
3547                 return -EIO;
3548
3549         if (params->auto_connect == auto_connect)
3550                 return 0;
3551
3552         list_del_init(&params->action);
3553
3554         switch (auto_connect) {
3555         case HCI_AUTO_CONN_DISABLED:
3556         case HCI_AUTO_CONN_LINK_LOSS:
3557                 hci_update_background_scan(hdev);
3558                 break;
3559         case HCI_AUTO_CONN_REPORT:
3560                 list_add(&params->action, &hdev->pend_le_reports);
3561                 hci_update_background_scan(hdev);
3562                 break;
3563         case HCI_AUTO_CONN_ALWAYS:
3564                 if (!is_connected(hdev, addr, addr_type)) {
3565                         list_add(&params->action, &hdev->pend_le_conns);
3566                         hci_update_background_scan(hdev);
3567                 }
3568                 break;
3569         }
3570
3571         params->auto_connect = auto_connect;
3572
3573         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3574                auto_connect);
3575
3576         return 0;
3577 }
3578
3579 /* This function requires the caller holds hdev->lock */
3580 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3581 {
3582         struct hci_conn_params *params;
3583
3584         params = hci_conn_params_lookup(hdev, addr, addr_type);
3585         if (!params)
3586                 return;
3587
3588         list_del(&params->action);
3589         list_del(&params->list);
3590         kfree(params);
3591
3592         hci_update_background_scan(hdev);
3593
3594         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3595 }
3596
3597 /* This function requires the caller holds hdev->lock */
3598 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3599 {
3600         struct hci_conn_params *params, *tmp;
3601
3602         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3603                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3604                         continue;
3605                 list_del(&params->list);
3606                 kfree(params);
3607         }
3608
3609         BT_DBG("All LE disabled connection parameters were removed");
3610 }
3611
3612 /* This function requires the caller holds hdev->lock */
3613 void hci_conn_params_clear_all(struct hci_dev *hdev)
3614 {
3615         struct hci_conn_params *params, *tmp;
3616
3617         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3618                 list_del(&params->action);
3619                 list_del(&params->list);
3620                 kfree(params);
3621         }
3622
3623         hci_update_background_scan(hdev);
3624
3625         BT_DBG("All LE connection parameters were removed");
3626 }
3627
3628 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3629 {
3630         if (status) {
3631                 BT_ERR("Failed to start inquiry: status %d", status);
3632
3633                 hci_dev_lock(hdev);
3634                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3635                 hci_dev_unlock(hdev);
3636                 return;
3637         }
3638 }
3639
3640 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3641 {
3642         /* General inquiry access code (GIAC) */
3643         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3644         struct hci_request req;
3645         struct hci_cp_inquiry cp;
3646         int err;
3647
3648         if (status) {
3649                 BT_ERR("Failed to disable LE scanning: status %d", status);
3650                 return;
3651         }
3652
3653         switch (hdev->discovery.type) {
3654         case DISCOV_TYPE_LE:
3655                 hci_dev_lock(hdev);
3656                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3657                 hci_dev_unlock(hdev);
3658                 break;
3659
3660         case DISCOV_TYPE_INTERLEAVED:
3661                 hci_req_init(&req, hdev);
3662
3663                 memset(&cp, 0, sizeof(cp));
3664                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3665                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3666                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3667
3668                 hci_dev_lock(hdev);
3669
3670                 hci_inquiry_cache_flush(hdev);
3671
3672                 err = hci_req_run(&req, inquiry_complete);
3673                 if (err) {
3674                         BT_ERR("Inquiry request failed: err %d", err);
3675                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3676                 }
3677
3678                 hci_dev_unlock(hdev);
3679                 break;
3680         }
3681 }
3682
3683 static void le_scan_disable_work(struct work_struct *work)
3684 {
3685         struct hci_dev *hdev = container_of(work, struct hci_dev,
3686                                             le_scan_disable.work);
3687         struct hci_request req;
3688         int err;
3689
3690         BT_DBG("%s", hdev->name);
3691
3692         hci_req_init(&req, hdev);
3693
3694         hci_req_add_le_scan_disable(&req);
3695
3696         err = hci_req_run(&req, le_scan_disable_work_complete);
3697         if (err)
3698                 BT_ERR("Disable LE scanning request failed: err %d", err);
3699 }
3700
3701 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3702 {
3703         struct hci_dev *hdev = req->hdev;
3704
3705         /* If we're advertising or initiating an LE connection we can't
3706          * go ahead and change the random address at this time. This is
3707          * because the eventual initiator address used for the
3708          * subsequently created connection will be undefined (some
3709          * controllers use the new address and others the one we had
3710          * when the operation started).
3711          *
3712          * In this kind of scenario skip the update and let the random
3713          * address be updated at the next cycle.
3714          */
3715         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3716             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3717                 BT_DBG("Deferring random address update");
3718                 return;
3719         }
3720
3721         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3722 }
3723
3724 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3725                               u8 *own_addr_type)
3726 {
3727         struct hci_dev *hdev = req->hdev;
3728         int err;
3729
3730         /* If privacy is enabled use a resolvable private address. If
3731          * current RPA has expired or there is something else than
3732          * the current RPA in use, then generate a new one.
3733          */
3734         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3735                 int to;
3736
3737                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3738
3739                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3740                     !bacmp(&hdev->random_addr, &hdev->rpa))
3741                         return 0;
3742
3743                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3744                 if (err < 0) {
3745                         BT_ERR("%s failed to generate new RPA", hdev->name);
3746                         return err;
3747                 }
3748
3749                 set_random_addr(req, &hdev->rpa);
3750
3751                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3752                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3753
3754                 return 0;
3755         }
3756
3757         /* In case of required privacy without resolvable private address,
3758          * use an unresolvable private address. This is useful for active
3759          * scanning and non-connectable advertising.
3760          */
3761         if (require_privacy) {
3762                 bdaddr_t urpa;
3763
3764                 get_random_bytes(&urpa, 6);
3765                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3766
3767                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3768                 set_random_addr(req, &urpa);
3769                 return 0;
3770         }
3771
3772         /* If forcing static address is in use or there is no public
3773          * address use the static address as random address (but skip
3774          * the HCI command if the current random address is already the
3775          * static one.
3776          */
3777         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3778             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3779                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3780                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3781                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3782                                     &hdev->static_addr);
3783                 return 0;
3784         }
3785
3786         /* Neither privacy nor static address is being used so use a
3787          * public address.
3788          */
3789         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3790
3791         return 0;
3792 }
3793
3794 /* Copy the Identity Address of the controller.
3795  *
3796  * If the controller has a public BD_ADDR, then by default use that one.
3797  * If this is a LE only controller without a public address, default to
3798  * the static random address.
3799  *
3800  * For debugging purposes it is possible to force controllers with a
3801  * public address to use the static random address instead.
3802  */
3803 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3804                                u8 *bdaddr_type)
3805 {
3806         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3807             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3808                 bacpy(bdaddr, &hdev->static_addr);
3809                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3810         } else {
3811                 bacpy(bdaddr, &hdev->bdaddr);
3812                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3813         }
3814 }
3815
3816 /* Alloc HCI device */
3817 struct hci_dev *hci_alloc_dev(void)
3818 {
3819         struct hci_dev *hdev;
3820
3821         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3822         if (!hdev)
3823                 return NULL;
3824
3825         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3826         hdev->esco_type = (ESCO_HV1);
3827         hdev->link_mode = (HCI_LM_ACCEPT);
3828         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3829         hdev->io_capability = 0x03;     /* No Input No Output */
3830         hdev->manufacturer = 0xffff;    /* Default to internal use */
3831         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3832         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3833
3834         hdev->sniff_max_interval = 800;
3835         hdev->sniff_min_interval = 80;
3836
3837         hdev->le_adv_channel_map = 0x07;
3838         hdev->le_scan_interval = 0x0060;
3839         hdev->le_scan_window = 0x0030;
3840         hdev->le_conn_min_interval = 0x0028;
3841         hdev->le_conn_max_interval = 0x0038;
3842         hdev->le_conn_latency = 0x0000;
3843         hdev->le_supv_timeout = 0x002a;
3844
3845         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3846         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3847         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3848         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3849
3850         mutex_init(&hdev->lock);
3851         mutex_init(&hdev->req_lock);
3852
3853         INIT_LIST_HEAD(&hdev->mgmt_pending);
3854         INIT_LIST_HEAD(&hdev->blacklist);
3855         INIT_LIST_HEAD(&hdev->whitelist);
3856         INIT_LIST_HEAD(&hdev->uuids);
3857         INIT_LIST_HEAD(&hdev->link_keys);
3858         INIT_LIST_HEAD(&hdev->long_term_keys);
3859         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3860         INIT_LIST_HEAD(&hdev->remote_oob_data);
3861         INIT_LIST_HEAD(&hdev->le_white_list);
3862         INIT_LIST_HEAD(&hdev->le_conn_params);
3863         INIT_LIST_HEAD(&hdev->pend_le_conns);
3864         INIT_LIST_HEAD(&hdev->pend_le_reports);
3865         INIT_LIST_HEAD(&hdev->conn_hash.list);
3866
3867         INIT_WORK(&hdev->rx_work, hci_rx_work);
3868         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3869         INIT_WORK(&hdev->tx_work, hci_tx_work);
3870         INIT_WORK(&hdev->power_on, hci_power_on);
3871
3872         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3873         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3874         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3875
3876         skb_queue_head_init(&hdev->rx_q);
3877         skb_queue_head_init(&hdev->cmd_q);
3878         skb_queue_head_init(&hdev->raw_q);
3879
3880         init_waitqueue_head(&hdev->req_wait_q);
3881
3882         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3883
3884         hci_init_sysfs(hdev);
3885         discovery_init(hdev);
3886
3887         return hdev;
3888 }
3889 EXPORT_SYMBOL(hci_alloc_dev);
3890
3891 /* Free HCI device */
3892 void hci_free_dev(struct hci_dev *hdev)
3893 {
3894         /* will free via device release */
3895         put_device(&hdev->dev);
3896 }
3897 EXPORT_SYMBOL(hci_free_dev);
3898
3899 /* Register HCI device */
3900 int hci_register_dev(struct hci_dev *hdev)
3901 {
3902         int id, error;
3903
3904         if (!hdev->open || !hdev->close || !hdev->send)
3905                 return -EINVAL;
3906
3907         /* Do not allow HCI_AMP devices to register at index 0,
3908          * so the index can be used as the AMP controller ID.
3909          */
3910         switch (hdev->dev_type) {
3911         case HCI_BREDR:
3912                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3913                 break;
3914         case HCI_AMP:
3915                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3916                 break;
3917         default:
3918                 return -EINVAL;
3919         }
3920
3921         if (id < 0)
3922                 return id;
3923
3924         sprintf(hdev->name, "hci%d", id);
3925         hdev->id = id;
3926
3927         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3928
3929         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3930                                           WQ_MEM_RECLAIM, 1, hdev->name);
3931         if (!hdev->workqueue) {
3932                 error = -ENOMEM;
3933                 goto err;
3934         }
3935
3936         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3937                                               WQ_MEM_RECLAIM, 1, hdev->name);
3938         if (!hdev->req_workqueue) {
3939                 destroy_workqueue(hdev->workqueue);
3940                 error = -ENOMEM;
3941                 goto err;
3942         }
3943
3944         if (!IS_ERR_OR_NULL(bt_debugfs))
3945                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3946
3947         dev_set_name(&hdev->dev, "%s", hdev->name);
3948
3949         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3950                                                CRYPTO_ALG_ASYNC);
3951         if (IS_ERR(hdev->tfm_aes)) {
3952                 BT_ERR("Unable to create crypto context");
3953                 error = PTR_ERR(hdev->tfm_aes);
3954                 hdev->tfm_aes = NULL;
3955                 goto err_wqueue;
3956         }
3957
3958         error = device_add(&hdev->dev);
3959         if (error < 0)
3960                 goto err_tfm;
3961
3962         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3963                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3964                                     hdev);
3965         if (hdev->rfkill) {
3966                 if (rfkill_register(hdev->rfkill) < 0) {
3967                         rfkill_destroy(hdev->rfkill);
3968                         hdev->rfkill = NULL;
3969                 }
3970         }
3971
3972         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3973                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3974
3975         set_bit(HCI_SETUP, &hdev->dev_flags);
3976         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3977
3978         if (hdev->dev_type == HCI_BREDR) {
3979                 /* Assume BR/EDR support until proven otherwise (such as
3980                  * through reading supported features during init.
3981                  */
3982                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3983         }
3984
3985         write_lock(&hci_dev_list_lock);
3986         list_add(&hdev->list, &hci_dev_list);
3987         write_unlock(&hci_dev_list_lock);
3988
3989         /* Devices that are marked for raw-only usage are unconfigured
3990          * and should not be included in normal operation.
3991          */
3992         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3993                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3994
3995         hci_notify(hdev, HCI_DEV_REG);
3996         hci_dev_hold(hdev);
3997
3998         queue_work(hdev->req_workqueue, &hdev->power_on);
3999
4000         return id;
4001
4002 err_tfm:
4003         crypto_free_blkcipher(hdev->tfm_aes);
4004 err_wqueue:
4005         destroy_workqueue(hdev->workqueue);
4006         destroy_workqueue(hdev->req_workqueue);
4007 err:
4008         ida_simple_remove(&hci_index_ida, hdev->id);
4009
4010         return error;
4011 }
4012 EXPORT_SYMBOL(hci_register_dev);
4013
4014 /* Unregister HCI device */
4015 void hci_unregister_dev(struct hci_dev *hdev)
4016 {
4017         int i, id;
4018
4019         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4020
4021         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4022
4023         id = hdev->id;
4024
4025         write_lock(&hci_dev_list_lock);
4026         list_del(&hdev->list);
4027         write_unlock(&hci_dev_list_lock);
4028
4029         hci_dev_do_close(hdev);
4030
4031         for (i = 0; i < NUM_REASSEMBLY; i++)
4032                 kfree_skb(hdev->reassembly[i]);
4033
4034         cancel_work_sync(&hdev->power_on);
4035
4036         if (!test_bit(HCI_INIT, &hdev->flags) &&
4037             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4038             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4039                 hci_dev_lock(hdev);
4040                 mgmt_index_removed(hdev);
4041                 hci_dev_unlock(hdev);
4042         }
4043
4044         /* mgmt_index_removed should take care of emptying the
4045          * pending list */
4046         BUG_ON(!list_empty(&hdev->mgmt_pending));
4047
4048         hci_notify(hdev, HCI_DEV_UNREG);
4049
4050         if (hdev->rfkill) {
4051                 rfkill_unregister(hdev->rfkill);
4052                 rfkill_destroy(hdev->rfkill);
4053         }
4054
4055         if (hdev->tfm_aes)
4056                 crypto_free_blkcipher(hdev->tfm_aes);
4057
4058         device_del(&hdev->dev);
4059
4060         debugfs_remove_recursive(hdev->debugfs);
4061
4062         destroy_workqueue(hdev->workqueue);
4063         destroy_workqueue(hdev->req_workqueue);
4064
4065         hci_dev_lock(hdev);
4066         hci_bdaddr_list_clear(&hdev->blacklist);
4067         hci_bdaddr_list_clear(&hdev->whitelist);
4068         hci_uuids_clear(hdev);
4069         hci_link_keys_clear(hdev);
4070         hci_smp_ltks_clear(hdev);
4071         hci_smp_irks_clear(hdev);
4072         hci_remote_oob_data_clear(hdev);
4073         hci_bdaddr_list_clear(&hdev->le_white_list);
4074         hci_conn_params_clear_all(hdev);
4075         hci_dev_unlock(hdev);
4076
4077         hci_dev_put(hdev);
4078
4079         ida_simple_remove(&hci_index_ida, id);
4080 }
4081 EXPORT_SYMBOL(hci_unregister_dev);
4082
4083 /* Suspend HCI device */
4084 int hci_suspend_dev(struct hci_dev *hdev)
4085 {
4086         hci_notify(hdev, HCI_DEV_SUSPEND);
4087         return 0;
4088 }
4089 EXPORT_SYMBOL(hci_suspend_dev);
4090
4091 /* Resume HCI device */
4092 int hci_resume_dev(struct hci_dev *hdev)
4093 {
4094         hci_notify(hdev, HCI_DEV_RESUME);
4095         return 0;
4096 }
4097 EXPORT_SYMBOL(hci_resume_dev);
4098
4099 /* Receive frame from HCI drivers */
4100 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4101 {
4102         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4103                       && !test_bit(HCI_INIT, &hdev->flags))) {
4104                 kfree_skb(skb);
4105                 return -ENXIO;
4106         }
4107
4108         /* Incoming skb */
4109         bt_cb(skb)->incoming = 1;
4110
4111         /* Time stamp */
4112         __net_timestamp(skb);
4113
4114         skb_queue_tail(&hdev->rx_q, skb);
4115         queue_work(hdev->workqueue, &hdev->rx_work);
4116
4117         return 0;
4118 }
4119 EXPORT_SYMBOL(hci_recv_frame);
4120
4121 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4122                           int count, __u8 index)
4123 {
4124         int len = 0;
4125         int hlen = 0;
4126         int remain = count;
4127         struct sk_buff *skb;
4128         struct bt_skb_cb *scb;
4129
4130         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4131             index >= NUM_REASSEMBLY)
4132                 return -EILSEQ;
4133
4134         skb = hdev->reassembly[index];
4135
4136         if (!skb) {
4137                 switch (type) {
4138                 case HCI_ACLDATA_PKT:
4139                         len = HCI_MAX_FRAME_SIZE;
4140                         hlen = HCI_ACL_HDR_SIZE;
4141                         break;
4142                 case HCI_EVENT_PKT:
4143                         len = HCI_MAX_EVENT_SIZE;
4144                         hlen = HCI_EVENT_HDR_SIZE;
4145                         break;
4146                 case HCI_SCODATA_PKT:
4147                         len = HCI_MAX_SCO_SIZE;
4148                         hlen = HCI_SCO_HDR_SIZE;
4149                         break;
4150                 }
4151
4152                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4153                 if (!skb)
4154                         return -ENOMEM;
4155
4156                 scb = (void *) skb->cb;
4157                 scb->expect = hlen;
4158                 scb->pkt_type = type;
4159
4160                 hdev->reassembly[index] = skb;
4161         }
4162
4163         while (count) {
4164                 scb = (void *) skb->cb;
4165                 len = min_t(uint, scb->expect, count);
4166
4167                 memcpy(skb_put(skb, len), data, len);
4168
4169                 count -= len;
4170                 data += len;
4171                 scb->expect -= len;
4172                 remain = count;
4173
4174                 switch (type) {
4175                 case HCI_EVENT_PKT:
4176                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4177                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4178                                 scb->expect = h->plen;
4179
4180                                 if (skb_tailroom(skb) < scb->expect) {
4181                                         kfree_skb(skb);
4182                                         hdev->reassembly[index] = NULL;
4183                                         return -ENOMEM;
4184                                 }
4185                         }
4186                         break;
4187
4188                 case HCI_ACLDATA_PKT:
4189                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4190                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4191                                 scb->expect = __le16_to_cpu(h->dlen);
4192
4193                                 if (skb_tailroom(skb) < scb->expect) {
4194                                         kfree_skb(skb);
4195                                         hdev->reassembly[index] = NULL;
4196                                         return -ENOMEM;
4197                                 }
4198                         }
4199                         break;
4200
4201                 case HCI_SCODATA_PKT:
4202                         if (skb->len == HCI_SCO_HDR_SIZE) {
4203                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4204                                 scb->expect = h->dlen;
4205
4206                                 if (skb_tailroom(skb) < scb->expect) {
4207                                         kfree_skb(skb);
4208                                         hdev->reassembly[index] = NULL;
4209                                         return -ENOMEM;
4210                                 }
4211                         }
4212                         break;
4213                 }
4214
4215                 if (scb->expect == 0) {
4216                         /* Complete frame */
4217
4218                         bt_cb(skb)->pkt_type = type;
4219                         hci_recv_frame(hdev, skb);
4220
4221                         hdev->reassembly[index] = NULL;
4222                         return remain;
4223                 }
4224         }
4225
4226         return remain;
4227 }
4228
4229 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4230 {
4231         int rem = 0;
4232
4233         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4234                 return -EILSEQ;
4235
4236         while (count) {
4237                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4238                 if (rem < 0)
4239                         return rem;
4240
4241                 data += (count - rem);
4242                 count = rem;
4243         }
4244
4245         return rem;
4246 }
4247 EXPORT_SYMBOL(hci_recv_fragment);
4248
4249 #define STREAM_REASSEMBLY 0
4250
4251 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4252 {
4253         int type;
4254         int rem = 0;
4255
4256         while (count) {
4257                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4258
4259                 if (!skb) {
4260                         struct { char type; } *pkt;
4261
4262                         /* Start of the frame */
4263                         pkt = data;
4264                         type = pkt->type;
4265
4266                         data++;
4267                         count--;
4268                 } else
4269                         type = bt_cb(skb)->pkt_type;
4270
4271                 rem = hci_reassembly(hdev, type, data, count,
4272                                      STREAM_REASSEMBLY);
4273                 if (rem < 0)
4274                         return rem;
4275
4276                 data += (count - rem);
4277                 count = rem;
4278         }
4279
4280         return rem;
4281 }
4282 EXPORT_SYMBOL(hci_recv_stream_fragment);
4283
4284 /* ---- Interface to upper protocols ---- */
4285
4286 int hci_register_cb(struct hci_cb *cb)
4287 {
4288         BT_DBG("%p name %s", cb, cb->name);
4289
4290         write_lock(&hci_cb_list_lock);
4291         list_add(&cb->list, &hci_cb_list);
4292         write_unlock(&hci_cb_list_lock);
4293
4294         return 0;
4295 }
4296 EXPORT_SYMBOL(hci_register_cb);
4297
4298 int hci_unregister_cb(struct hci_cb *cb)
4299 {
4300         BT_DBG("%p name %s", cb, cb->name);
4301
4302         write_lock(&hci_cb_list_lock);
4303         list_del(&cb->list);
4304         write_unlock(&hci_cb_list_lock);
4305
4306         return 0;
4307 }
4308 EXPORT_SYMBOL(hci_unregister_cb);
4309
4310 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4311 {
4312         int err;
4313
4314         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4315
4316         /* Time stamp */
4317         __net_timestamp(skb);
4318
4319         /* Send copy to monitor */
4320         hci_send_to_monitor(hdev, skb);
4321
4322         if (atomic_read(&hdev->promisc)) {
4323                 /* Send copy to the sockets */
4324                 hci_send_to_sock(hdev, skb);
4325         }
4326
4327         /* Get rid of skb owner, prior to sending to the driver. */
4328         skb_orphan(skb);
4329
4330         err = hdev->send(hdev, skb);
4331         if (err < 0) {
4332                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4333                 kfree_skb(skb);
4334         }
4335 }
4336
4337 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4338 {
4339         skb_queue_head_init(&req->cmd_q);
4340         req->hdev = hdev;
4341         req->err = 0;
4342 }
4343
4344 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4345 {
4346         struct hci_dev *hdev = req->hdev;
4347         struct sk_buff *skb;
4348         unsigned long flags;
4349
4350         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4351
4352         /* If an error occured during request building, remove all HCI
4353          * commands queued on the HCI request queue.
4354          */
4355         if (req->err) {
4356                 skb_queue_purge(&req->cmd_q);
4357                 return req->err;
4358         }
4359
4360         /* Do not allow empty requests */
4361         if (skb_queue_empty(&req->cmd_q))
4362                 return -ENODATA;
4363
4364         skb = skb_peek_tail(&req->cmd_q);
4365         bt_cb(skb)->req.complete = complete;
4366
4367         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4368         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4369         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4370
4371         queue_work(hdev->workqueue, &hdev->cmd_work);
4372
4373         return 0;
4374 }
4375
4376 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4377                                        u32 plen, const void *param)
4378 {
4379         int len = HCI_COMMAND_HDR_SIZE + plen;
4380         struct hci_command_hdr *hdr;
4381         struct sk_buff *skb;
4382
4383         skb = bt_skb_alloc(len, GFP_ATOMIC);
4384         if (!skb)
4385                 return NULL;
4386
4387         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4388         hdr->opcode = cpu_to_le16(opcode);
4389         hdr->plen   = plen;
4390
4391         if (plen)
4392                 memcpy(skb_put(skb, plen), param, plen);
4393
4394         BT_DBG("skb len %d", skb->len);
4395
4396         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4397
4398         return skb;
4399 }
4400
4401 /* Send HCI command */
4402 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4403                  const void *param)
4404 {
4405         struct sk_buff *skb;
4406
4407         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4408
4409         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4410         if (!skb) {
4411                 BT_ERR("%s no memory for command", hdev->name);
4412                 return -ENOMEM;
4413         }
4414
4415         /* Stand-alone HCI commands must be flaged as
4416          * single-command requests.
4417          */
4418         bt_cb(skb)->req.start = true;
4419
4420         skb_queue_tail(&hdev->cmd_q, skb);
4421         queue_work(hdev->workqueue, &hdev->cmd_work);
4422
4423         return 0;
4424 }
4425
4426 /* Queue a command to an asynchronous HCI request */
4427 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4428                     const void *param, u8 event)
4429 {
4430         struct hci_dev *hdev = req->hdev;
4431         struct sk_buff *skb;
4432
4433         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4434
4435         /* If an error occured during request building, there is no point in
4436          * queueing the HCI command. We can simply return.
4437          */
4438         if (req->err)
4439                 return;
4440
4441         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4442         if (!skb) {
4443                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4444                        hdev->name, opcode);
4445                 req->err = -ENOMEM;
4446                 return;
4447         }
4448
4449         if (skb_queue_empty(&req->cmd_q))
4450                 bt_cb(skb)->req.start = true;
4451
4452         bt_cb(skb)->req.event = event;
4453
4454         skb_queue_tail(&req->cmd_q, skb);
4455 }
4456
4457 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4458                  const void *param)
4459 {
4460         hci_req_add_ev(req, opcode, plen, param, 0);
4461 }
4462
4463 /* Get data from the previously sent command */
4464 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4465 {
4466         struct hci_command_hdr *hdr;
4467
4468         if (!hdev->sent_cmd)
4469                 return NULL;
4470
4471         hdr = (void *) hdev->sent_cmd->data;
4472
4473         if (hdr->opcode != cpu_to_le16(opcode))
4474                 return NULL;
4475
4476         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4477
4478         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4479 }
4480
4481 /* Send ACL data */
4482 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4483 {
4484         struct hci_acl_hdr *hdr;
4485         int len = skb->len;
4486
4487         skb_push(skb, HCI_ACL_HDR_SIZE);
4488         skb_reset_transport_header(skb);
4489         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4490         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4491         hdr->dlen   = cpu_to_le16(len);
4492 }
4493
4494 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4495                           struct sk_buff *skb, __u16 flags)
4496 {
4497         struct hci_conn *conn = chan->conn;
4498         struct hci_dev *hdev = conn->hdev;
4499         struct sk_buff *list;
4500
4501         skb->len = skb_headlen(skb);
4502         skb->data_len = 0;
4503
4504         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4505
4506         switch (hdev->dev_type) {
4507         case HCI_BREDR:
4508                 hci_add_acl_hdr(skb, conn->handle, flags);
4509                 break;
4510         case HCI_AMP:
4511                 hci_add_acl_hdr(skb, chan->handle, flags);
4512                 break;
4513         default:
4514                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4515                 return;
4516         }
4517
4518         list = skb_shinfo(skb)->frag_list;
4519         if (!list) {
4520                 /* Non fragmented */
4521                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4522
4523                 skb_queue_tail(queue, skb);
4524         } else {
4525                 /* Fragmented */
4526                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4527
4528                 skb_shinfo(skb)->frag_list = NULL;
4529
4530                 /* Queue all fragments atomically */
4531                 spin_lock(&queue->lock);
4532
4533                 __skb_queue_tail(queue, skb);
4534
4535                 flags &= ~ACL_START;
4536                 flags |= ACL_CONT;
4537                 do {
4538                         skb = list; list = list->next;
4539
4540                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4541                         hci_add_acl_hdr(skb, conn->handle, flags);
4542
4543                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4544
4545                         __skb_queue_tail(queue, skb);
4546                 } while (list);
4547
4548                 spin_unlock(&queue->lock);
4549         }
4550 }
4551
4552 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4553 {
4554         struct hci_dev *hdev = chan->conn->hdev;
4555
4556         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4557
4558         hci_queue_acl(chan, &chan->data_q, skb, flags);
4559
4560         queue_work(hdev->workqueue, &hdev->tx_work);
4561 }
4562
4563 /* Send SCO data */
4564 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4565 {
4566         struct hci_dev *hdev = conn->hdev;
4567         struct hci_sco_hdr hdr;
4568
4569         BT_DBG("%s len %d", hdev->name, skb->len);
4570
4571         hdr.handle = cpu_to_le16(conn->handle);
4572         hdr.dlen   = skb->len;
4573
4574         skb_push(skb, HCI_SCO_HDR_SIZE);
4575         skb_reset_transport_header(skb);
4576         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4577
4578         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4579
4580         skb_queue_tail(&conn->data_q, skb);
4581         queue_work(hdev->workqueue, &hdev->tx_work);
4582 }
4583
4584 /* ---- HCI TX task (outgoing data) ---- */
4585
4586 /* HCI Connection scheduler */
4587 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4588                                      int *quote)
4589 {
4590         struct hci_conn_hash *h = &hdev->conn_hash;
4591         struct hci_conn *conn = NULL, *c;
4592         unsigned int num = 0, min = ~0;
4593
4594         /* We don't have to lock device here. Connections are always
4595          * added and removed with TX task disabled. */
4596
4597         rcu_read_lock();
4598
4599         list_for_each_entry_rcu(c, &h->list, list) {
4600                 if (c->type != type || skb_queue_empty(&c->data_q))
4601                         continue;
4602
4603                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4604                         continue;
4605
4606                 num++;
4607
4608                 if (c->sent < min) {
4609                         min  = c->sent;
4610                         conn = c;
4611                 }
4612
4613                 if (hci_conn_num(hdev, type) == num)
4614                         break;
4615         }
4616
4617         rcu_read_unlock();
4618
4619         if (conn) {
4620                 int cnt, q;
4621
4622                 switch (conn->type) {
4623                 case ACL_LINK:
4624                         cnt = hdev->acl_cnt;
4625                         break;
4626                 case SCO_LINK:
4627                 case ESCO_LINK:
4628                         cnt = hdev->sco_cnt;
4629                         break;
4630                 case LE_LINK:
4631                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4632                         break;
4633                 default:
4634                         cnt = 0;
4635                         BT_ERR("Unknown link type");
4636                 }
4637
4638                 q = cnt / num;
4639                 *quote = q ? q : 1;
4640         } else
4641                 *quote = 0;
4642
4643         BT_DBG("conn %p quote %d", conn, *quote);
4644         return conn;
4645 }
4646
4647 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4648 {
4649         struct hci_conn_hash *h = &hdev->conn_hash;
4650         struct hci_conn *c;
4651
4652         BT_ERR("%s link tx timeout", hdev->name);
4653
4654         rcu_read_lock();
4655
4656         /* Kill stalled connections */
4657         list_for_each_entry_rcu(c, &h->list, list) {
4658                 if (c->type == type && c->sent) {
4659                         BT_ERR("%s killing stalled connection %pMR",
4660                                hdev->name, &c->dst);
4661                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4662                 }
4663         }
4664
4665         rcu_read_unlock();
4666 }
4667
4668 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4669                                       int *quote)
4670 {
4671         struct hci_conn_hash *h = &hdev->conn_hash;
4672         struct hci_chan *chan = NULL;
4673         unsigned int num = 0, min = ~0, cur_prio = 0;
4674         struct hci_conn *conn;
4675         int cnt, q, conn_num = 0;
4676
4677         BT_DBG("%s", hdev->name);
4678
4679         rcu_read_lock();
4680
4681         list_for_each_entry_rcu(conn, &h->list, list) {
4682                 struct hci_chan *tmp;
4683
4684                 if (conn->type != type)
4685                         continue;
4686
4687                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4688                         continue;
4689
4690                 conn_num++;
4691
4692                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4693                         struct sk_buff *skb;
4694
4695                         if (skb_queue_empty(&tmp->data_q))
4696                                 continue;
4697
4698                         skb = skb_peek(&tmp->data_q);
4699                         if (skb->priority < cur_prio)
4700                                 continue;
4701
4702                         if (skb->priority > cur_prio) {
4703                                 num = 0;
4704                                 min = ~0;
4705                                 cur_prio = skb->priority;
4706                         }
4707
4708                         num++;
4709
4710                         if (conn->sent < min) {
4711                                 min  = conn->sent;
4712                                 chan = tmp;
4713                         }
4714                 }
4715
4716                 if (hci_conn_num(hdev, type) == conn_num)
4717                         break;
4718         }
4719
4720         rcu_read_unlock();
4721
4722         if (!chan)
4723                 return NULL;
4724
4725         switch (chan->conn->type) {
4726         case ACL_LINK:
4727                 cnt = hdev->acl_cnt;
4728                 break;
4729         case AMP_LINK:
4730                 cnt = hdev->block_cnt;
4731                 break;
4732         case SCO_LINK:
4733         case ESCO_LINK:
4734                 cnt = hdev->sco_cnt;
4735                 break;
4736         case LE_LINK:
4737                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4738                 break;
4739         default:
4740                 cnt = 0;
4741                 BT_ERR("Unknown link type");
4742         }
4743
4744         q = cnt / num;
4745         *quote = q ? q : 1;
4746         BT_DBG("chan %p quote %d", chan, *quote);
4747         return chan;
4748 }
4749
4750 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4751 {
4752         struct hci_conn_hash *h = &hdev->conn_hash;
4753         struct hci_conn *conn;
4754         int num = 0;
4755
4756         BT_DBG("%s", hdev->name);
4757
4758         rcu_read_lock();
4759
4760         list_for_each_entry_rcu(conn, &h->list, list) {
4761                 struct hci_chan *chan;
4762
4763                 if (conn->type != type)
4764                         continue;
4765
4766                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4767                         continue;
4768
4769                 num++;
4770
4771                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4772                         struct sk_buff *skb;
4773
4774                         if (chan->sent) {
4775                                 chan->sent = 0;
4776                                 continue;
4777                         }
4778
4779                         if (skb_queue_empty(&chan->data_q))
4780                                 continue;
4781
4782                         skb = skb_peek(&chan->data_q);
4783                         if (skb->priority >= HCI_PRIO_MAX - 1)
4784                                 continue;
4785
4786                         skb->priority = HCI_PRIO_MAX - 1;
4787
4788                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4789                                skb->priority);
4790                 }
4791
4792                 if (hci_conn_num(hdev, type) == num)
4793                         break;
4794         }
4795
4796         rcu_read_unlock();
4797
4798 }
4799
4800 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4801 {
4802         /* Calculate count of blocks used by this packet */
4803         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4804 }
4805
4806 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4807 {
4808         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4809                 /* ACL tx timeout must be longer than maximum
4810                  * link supervision timeout (40.9 seconds) */
4811                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4812                                        HCI_ACL_TX_TIMEOUT))
4813                         hci_link_tx_to(hdev, ACL_LINK);
4814         }
4815 }
4816
4817 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4818 {
4819         unsigned int cnt = hdev->acl_cnt;
4820         struct hci_chan *chan;
4821         struct sk_buff *skb;
4822         int quote;
4823
4824         __check_timeout(hdev, cnt);
4825
4826         while (hdev->acl_cnt &&
4827                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4828                 u32 priority = (skb_peek(&chan->data_q))->priority;
4829                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4830                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4831                                skb->len, skb->priority);
4832
4833                         /* Stop if priority has changed */
4834                         if (skb->priority < priority)
4835                                 break;
4836
4837                         skb = skb_dequeue(&chan->data_q);
4838
4839                         hci_conn_enter_active_mode(chan->conn,
4840                                                    bt_cb(skb)->force_active);
4841
4842                         hci_send_frame(hdev, skb);
4843                         hdev->acl_last_tx = jiffies;
4844
4845                         hdev->acl_cnt--;
4846                         chan->sent++;
4847                         chan->conn->sent++;
4848                 }
4849         }
4850
4851         if (cnt != hdev->acl_cnt)
4852                 hci_prio_recalculate(hdev, ACL_LINK);
4853 }
4854
4855 static void hci_sched_acl_blk(struct hci_dev *hdev)
4856 {
4857         unsigned int cnt = hdev->block_cnt;
4858         struct hci_chan *chan;
4859         struct sk_buff *skb;
4860         int quote;
4861         u8 type;
4862
4863         __check_timeout(hdev, cnt);
4864
4865         BT_DBG("%s", hdev->name);
4866
4867         if (hdev->dev_type == HCI_AMP)
4868                 type = AMP_LINK;
4869         else
4870                 type = ACL_LINK;
4871
4872         while (hdev->block_cnt > 0 &&
4873                (chan = hci_chan_sent(hdev, type, &quote))) {
4874                 u32 priority = (skb_peek(&chan->data_q))->priority;
4875                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4876                         int blocks;
4877
4878                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4879                                skb->len, skb->priority);
4880
4881                         /* Stop if priority has changed */
4882                         if (skb->priority < priority)
4883                                 break;
4884
4885                         skb = skb_dequeue(&chan->data_q);
4886
4887                         blocks = __get_blocks(hdev, skb);
4888                         if (blocks > hdev->block_cnt)
4889                                 return;
4890
4891                         hci_conn_enter_active_mode(chan->conn,
4892                                                    bt_cb(skb)->force_active);
4893
4894                         hci_send_frame(hdev, skb);
4895                         hdev->acl_last_tx = jiffies;
4896
4897                         hdev->block_cnt -= blocks;
4898                         quote -= blocks;
4899
4900                         chan->sent += blocks;
4901                         chan->conn->sent += blocks;
4902                 }
4903         }
4904
4905         if (cnt != hdev->block_cnt)
4906                 hci_prio_recalculate(hdev, type);
4907 }
4908
4909 static void hci_sched_acl(struct hci_dev *hdev)
4910 {
4911         BT_DBG("%s", hdev->name);
4912
4913         /* No ACL link over BR/EDR controller */
4914         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4915                 return;
4916
4917         /* No AMP link over AMP controller */
4918         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4919                 return;
4920
4921         switch (hdev->flow_ctl_mode) {
4922         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4923                 hci_sched_acl_pkt(hdev);
4924                 break;
4925
4926         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4927                 hci_sched_acl_blk(hdev);
4928                 break;
4929         }
4930 }
4931
4932 /* Schedule SCO */
4933 static void hci_sched_sco(struct hci_dev *hdev)
4934 {
4935         struct hci_conn *conn;
4936         struct sk_buff *skb;
4937         int quote;
4938
4939         BT_DBG("%s", hdev->name);
4940
4941         if (!hci_conn_num(hdev, SCO_LINK))
4942                 return;
4943
4944         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4945                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4946                         BT_DBG("skb %p len %d", skb, skb->len);
4947                         hci_send_frame(hdev, skb);
4948
4949                         conn->sent++;
4950                         if (conn->sent == ~0)
4951                                 conn->sent = 0;
4952                 }
4953         }
4954 }
4955
4956 static void hci_sched_esco(struct hci_dev *hdev)
4957 {
4958         struct hci_conn *conn;
4959         struct sk_buff *skb;
4960         int quote;
4961
4962         BT_DBG("%s", hdev->name);
4963
4964         if (!hci_conn_num(hdev, ESCO_LINK))
4965                 return;
4966
4967         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4968                                                      &quote))) {
4969                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4970                         BT_DBG("skb %p len %d", skb, skb->len);
4971                         hci_send_frame(hdev, skb);
4972
4973                         conn->sent++;
4974                         if (conn->sent == ~0)
4975                                 conn->sent = 0;
4976                 }
4977         }
4978 }
4979
4980 static void hci_sched_le(struct hci_dev *hdev)
4981 {
4982         struct hci_chan *chan;
4983         struct sk_buff *skb;
4984         int quote, cnt, tmp;
4985
4986         BT_DBG("%s", hdev->name);
4987
4988         if (!hci_conn_num(hdev, LE_LINK))
4989                 return;
4990
4991         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4992                 /* LE tx timeout must be longer than maximum
4993                  * link supervision timeout (40.9 seconds) */
4994                 if (!hdev->le_cnt && hdev->le_pkts &&
4995                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4996                         hci_link_tx_to(hdev, LE_LINK);
4997         }
4998
4999         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5000         tmp = cnt;
5001         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5002                 u32 priority = (skb_peek(&chan->data_q))->priority;
5003                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5004                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5005                                skb->len, skb->priority);
5006
5007                         /* Stop if priority has changed */
5008                         if (skb->priority < priority)
5009                                 break;
5010
5011                         skb = skb_dequeue(&chan->data_q);
5012
5013                         hci_send_frame(hdev, skb);
5014                         hdev->le_last_tx = jiffies;
5015
5016                         cnt--;
5017                         chan->sent++;
5018                         chan->conn->sent++;
5019                 }
5020         }
5021
5022         if (hdev->le_pkts)
5023                 hdev->le_cnt = cnt;
5024         else
5025                 hdev->acl_cnt = cnt;
5026
5027         if (cnt != tmp)
5028                 hci_prio_recalculate(hdev, LE_LINK);
5029 }
5030
5031 static void hci_tx_work(struct work_struct *work)
5032 {
5033         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5034         struct sk_buff *skb;
5035
5036         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5037                hdev->sco_cnt, hdev->le_cnt);
5038
5039         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5040                 /* Schedule queues and send stuff to HCI driver */
5041                 hci_sched_acl(hdev);
5042                 hci_sched_sco(hdev);
5043                 hci_sched_esco(hdev);
5044                 hci_sched_le(hdev);
5045         }
5046
5047         /* Send next queued raw (unknown type) packet */
5048         while ((skb = skb_dequeue(&hdev->raw_q)))
5049                 hci_send_frame(hdev, skb);
5050 }
5051
5052 /* ----- HCI RX task (incoming data processing) ----- */
5053
5054 /* ACL data packet */
5055 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5056 {
5057         struct hci_acl_hdr *hdr = (void *) skb->data;
5058         struct hci_conn *conn;
5059         __u16 handle, flags;
5060
5061         skb_pull(skb, HCI_ACL_HDR_SIZE);
5062
5063         handle = __le16_to_cpu(hdr->handle);
5064         flags  = hci_flags(handle);
5065         handle = hci_handle(handle);
5066
5067         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5068                handle, flags);
5069
5070         hdev->stat.acl_rx++;
5071
5072         hci_dev_lock(hdev);
5073         conn = hci_conn_hash_lookup_handle(hdev, handle);
5074         hci_dev_unlock(hdev);
5075
5076         if (conn) {
5077                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5078
5079                 /* Send to upper protocol */
5080                 l2cap_recv_acldata(conn, skb, flags);
5081                 return;
5082         } else {
5083                 BT_ERR("%s ACL packet for unknown connection handle %d",
5084                        hdev->name, handle);
5085         }
5086
5087         kfree_skb(skb);
5088 }
5089
5090 /* SCO data packet */
5091 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5092 {
5093         struct hci_sco_hdr *hdr = (void *) skb->data;
5094         struct hci_conn *conn;
5095         __u16 handle;
5096
5097         skb_pull(skb, HCI_SCO_HDR_SIZE);
5098
5099         handle = __le16_to_cpu(hdr->handle);
5100
5101         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5102
5103         hdev->stat.sco_rx++;
5104
5105         hci_dev_lock(hdev);
5106         conn = hci_conn_hash_lookup_handle(hdev, handle);
5107         hci_dev_unlock(hdev);
5108
5109         if (conn) {
5110                 /* Send to upper protocol */
5111                 sco_recv_scodata(conn, skb);
5112                 return;
5113         } else {
5114                 BT_ERR("%s SCO packet for unknown connection handle %d",
5115                        hdev->name, handle);
5116         }
5117
5118         kfree_skb(skb);
5119 }
5120
5121 static bool hci_req_is_complete(struct hci_dev *hdev)
5122 {
5123         struct sk_buff *skb;
5124
5125         skb = skb_peek(&hdev->cmd_q);
5126         if (!skb)
5127                 return true;
5128
5129         return bt_cb(skb)->req.start;
5130 }
5131
5132 static void hci_resend_last(struct hci_dev *hdev)
5133 {
5134         struct hci_command_hdr *sent;
5135         struct sk_buff *skb;
5136         u16 opcode;
5137
5138         if (!hdev->sent_cmd)
5139                 return;
5140
5141         sent = (void *) hdev->sent_cmd->data;
5142         opcode = __le16_to_cpu(sent->opcode);
5143         if (opcode == HCI_OP_RESET)
5144                 return;
5145
5146         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5147         if (!skb)
5148                 return;
5149
5150         skb_queue_head(&hdev->cmd_q, skb);
5151         queue_work(hdev->workqueue, &hdev->cmd_work);
5152 }
5153
5154 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5155 {
5156         hci_req_complete_t req_complete = NULL;
5157         struct sk_buff *skb;
5158         unsigned long flags;
5159
5160         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5161
5162         /* If the completed command doesn't match the last one that was
5163          * sent we need to do special handling of it.
5164          */
5165         if (!hci_sent_cmd_data(hdev, opcode)) {
5166                 /* Some CSR based controllers generate a spontaneous
5167                  * reset complete event during init and any pending
5168                  * command will never be completed. In such a case we
5169                  * need to resend whatever was the last sent
5170                  * command.
5171                  */
5172                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5173                         hci_resend_last(hdev);
5174
5175                 return;
5176         }
5177
5178         /* If the command succeeded and there's still more commands in
5179          * this request the request is not yet complete.
5180          */
5181         if (!status && !hci_req_is_complete(hdev))
5182                 return;
5183
5184         /* If this was the last command in a request the complete
5185          * callback would be found in hdev->sent_cmd instead of the
5186          * command queue (hdev->cmd_q).
5187          */
5188         if (hdev->sent_cmd) {
5189                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5190
5191                 if (req_complete) {
5192                         /* We must set the complete callback to NULL to
5193                          * avoid calling the callback more than once if
5194                          * this function gets called again.
5195                          */
5196                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5197
5198                         goto call_complete;
5199                 }
5200         }
5201
5202         /* Remove all pending commands belonging to this request */
5203         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5204         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5205                 if (bt_cb(skb)->req.start) {
5206                         __skb_queue_head(&hdev->cmd_q, skb);
5207                         break;
5208                 }
5209
5210                 req_complete = bt_cb(skb)->req.complete;
5211                 kfree_skb(skb);
5212         }
5213         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5214
5215 call_complete:
5216         if (req_complete)
5217                 req_complete(hdev, status);
5218 }
5219
5220 static void hci_rx_work(struct work_struct *work)
5221 {
5222         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5223         struct sk_buff *skb;
5224
5225         BT_DBG("%s", hdev->name);
5226
5227         while ((skb = skb_dequeue(&hdev->rx_q))) {
5228                 /* Send copy to monitor */
5229                 hci_send_to_monitor(hdev, skb);
5230
5231                 if (atomic_read(&hdev->promisc)) {
5232                         /* Send copy to the sockets */
5233                         hci_send_to_sock(hdev, skb);
5234                 }
5235
5236                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5237                         kfree_skb(skb);
5238                         continue;
5239                 }
5240
5241                 if (test_bit(HCI_INIT, &hdev->flags)) {
5242                         /* Don't process data packets in this states. */
5243                         switch (bt_cb(skb)->pkt_type) {
5244                         case HCI_ACLDATA_PKT:
5245                         case HCI_SCODATA_PKT:
5246                                 kfree_skb(skb);
5247                                 continue;
5248                         }
5249                 }
5250
5251                 /* Process frame */
5252                 switch (bt_cb(skb)->pkt_type) {
5253                 case HCI_EVENT_PKT:
5254                         BT_DBG("%s Event packet", hdev->name);
5255                         hci_event_packet(hdev, skb);
5256                         break;
5257
5258                 case HCI_ACLDATA_PKT:
5259                         BT_DBG("%s ACL data packet", hdev->name);
5260                         hci_acldata_packet(hdev, skb);
5261                         break;
5262
5263                 case HCI_SCODATA_PKT:
5264                         BT_DBG("%s SCO data packet", hdev->name);
5265                         hci_scodata_packet(hdev, skb);
5266                         break;
5267
5268                 default:
5269                         kfree_skb(skb);
5270                         break;
5271                 }
5272         }
5273 }
5274
5275 static void hci_cmd_work(struct work_struct *work)
5276 {
5277         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5278         struct sk_buff *skb;
5279
5280         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5281                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5282
5283         /* Send queued commands */
5284         if (atomic_read(&hdev->cmd_cnt)) {
5285                 skb = skb_dequeue(&hdev->cmd_q);
5286                 if (!skb)
5287                         return;
5288
5289                 kfree_skb(hdev->sent_cmd);
5290
5291                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5292                 if (hdev->sent_cmd) {
5293                         atomic_dec(&hdev->cmd_cnt);
5294                         hci_send_frame(hdev, skb);
5295                         if (test_bit(HCI_RESET, &hdev->flags))
5296                                 cancel_delayed_work(&hdev->cmd_timer);
5297                         else
5298                                 schedule_delayed_work(&hdev->cmd_timer,
5299                                                       HCI_CMD_TIMEOUT);
5300                 } else {
5301                         skb_queue_head(&hdev->cmd_q, skb);
5302                         queue_work(hdev->workqueue, &hdev->cmd_work);
5303                 }
5304         }
5305 }
5306
5307 void hci_req_add_le_scan_disable(struct hci_request *req)
5308 {
5309         struct hci_cp_le_set_scan_enable cp;
5310
5311         memset(&cp, 0, sizeof(cp));
5312         cp.enable = LE_SCAN_DISABLE;
5313         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5314 }
5315
5316 void hci_req_add_le_passive_scan(struct hci_request *req)
5317 {
5318         struct hci_cp_le_set_scan_param param_cp;
5319         struct hci_cp_le_set_scan_enable enable_cp;
5320         struct hci_dev *hdev = req->hdev;
5321         u8 own_addr_type;
5322
5323         /* Set require_privacy to false since no SCAN_REQ are send
5324          * during passive scanning. Not using an unresolvable address
5325          * here is important so that peer devices using direct
5326          * advertising with our address will be correctly reported
5327          * by the controller.
5328          */
5329         if (hci_update_random_address(req, false, &own_addr_type))
5330                 return;
5331
5332         memset(&param_cp, 0, sizeof(param_cp));
5333         param_cp.type = LE_SCAN_PASSIVE;
5334         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5335         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5336         param_cp.own_address_type = own_addr_type;
5337         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5338                     &param_cp);
5339
5340         memset(&enable_cp, 0, sizeof(enable_cp));
5341         enable_cp.enable = LE_SCAN_ENABLE;
5342         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5343         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5344                     &enable_cp);
5345 }
5346
5347 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5348 {
5349         if (status)
5350                 BT_DBG("HCI request failed to update background scanning: "
5351                        "status 0x%2.2x", status);
5352 }
5353
5354 /* This function controls the background scanning based on hdev->pend_le_conns
5355  * list. If there are pending LE connection we start the background scanning,
5356  * otherwise we stop it.
5357  *
5358  * This function requires the caller holds hdev->lock.
5359  */
5360 void hci_update_background_scan(struct hci_dev *hdev)
5361 {
5362         struct hci_request req;
5363         struct hci_conn *conn;
5364         int err;
5365
5366         if (!test_bit(HCI_UP, &hdev->flags) ||
5367             test_bit(HCI_INIT, &hdev->flags) ||
5368             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5369             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5370             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5371             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5372                 return;
5373
5374         /* No point in doing scanning if LE support hasn't been enabled */
5375         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5376                 return;
5377
5378         /* If discovery is active don't interfere with it */
5379         if (hdev->discovery.state != DISCOVERY_STOPPED)
5380                 return;
5381
5382         hci_req_init(&req, hdev);
5383
5384         if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5385             list_empty(&hdev->pend_le_conns) &&
5386             list_empty(&hdev->pend_le_reports)) {
5387                 /* If there is no pending LE connections or devices
5388                  * to be scanned for, we should stop the background
5389                  * scanning.
5390                  */
5391
5392                 /* If controller is not scanning we are done. */
5393                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5394                         return;
5395
5396                 hci_req_add_le_scan_disable(&req);
5397
5398                 BT_DBG("%s stopping background scanning", hdev->name);
5399         } else {
5400                 /* If there is at least one pending LE connection, we should
5401                  * keep the background scan running.
5402                  */
5403
5404                 /* If controller is connecting, we should not start scanning
5405                  * since some controllers are not able to scan and connect at
5406                  * the same time.
5407                  */
5408                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5409                 if (conn)
5410                         return;
5411
5412                 /* If controller is currently scanning, we stop it to ensure we
5413                  * don't miss any advertising (due to duplicates filter).
5414                  */
5415                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5416                         hci_req_add_le_scan_disable(&req);
5417
5418                 hci_req_add_le_passive_scan(&req);
5419
5420                 BT_DBG("%s starting background scanning", hdev->name);
5421         }
5422
5423         err = hci_req_run(&req, update_background_scan_complete);
5424         if (err)
5425                 BT_ERR("Failed to run HCI request: err %d", err);
5426 }