]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
705f8df7af9631e33eedcfe858bb5135da0d4777
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int uuids_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bt_uuid *uuid;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(uuid, &hdev->uuids, list) {
201                 u8 i, val[16];
202
203                 /* The Bluetooth UUID values are stored in big endian,
204                  * but with reversed byte order. So convert them into
205                  * the right order for the %pUb modifier.
206                  */
207                 for (i = 0; i < 16; i++)
208                         val[i] = uuid->uuid[15 - i];
209
210                 seq_printf(f, "%pUb\n", val);
211         }
212         hci_dev_unlock(hdev);
213
214         return 0;
215 }
216
217 static int uuids_open(struct inode *inode, struct file *file)
218 {
219         return single_open(file, uuids_show, inode->i_private);
220 }
221
222 static const struct file_operations uuids_fops = {
223         .open           = uuids_open,
224         .read           = seq_read,
225         .llseek         = seq_lseek,
226         .release        = single_release,
227 };
228
229 static int inquiry_cache_show(struct seq_file *f, void *p)
230 {
231         struct hci_dev *hdev = f->private;
232         struct discovery_state *cache = &hdev->discovery;
233         struct inquiry_entry *e;
234
235         hci_dev_lock(hdev);
236
237         list_for_each_entry(e, &cache->all, all) {
238                 struct inquiry_data *data = &e->data;
239                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240                            &data->bdaddr,
241                            data->pscan_rep_mode, data->pscan_period_mode,
242                            data->pscan_mode, data->dev_class[2],
243                            data->dev_class[1], data->dev_class[0],
244                            __le16_to_cpu(data->clock_offset),
245                            data->rssi, data->ssp_mode, e->timestamp);
246         }
247
248         hci_dev_unlock(hdev);
249
250         return 0;
251 }
252
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 {
255         return single_open(file, inquiry_cache_show, inode->i_private);
256 }
257
258 static const struct file_operations inquiry_cache_fops = {
259         .open           = inquiry_cache_open,
260         .read           = seq_read,
261         .llseek         = seq_lseek,
262         .release        = single_release,
263 };
264
265 static int link_keys_show(struct seq_file *f, void *ptr)
266 {
267         struct hci_dev *hdev = f->private;
268         struct list_head *p, *n;
269
270         hci_dev_lock(hdev);
271         list_for_each_safe(p, n, &hdev->link_keys) {
272                 struct link_key *key = list_entry(p, struct link_key, list);
273                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275         }
276         hci_dev_unlock(hdev);
277
278         return 0;
279 }
280
281 static int link_keys_open(struct inode *inode, struct file *file)
282 {
283         return single_open(file, link_keys_show, inode->i_private);
284 }
285
286 static const struct file_operations link_keys_fops = {
287         .open           = link_keys_open,
288         .read           = seq_read,
289         .llseek         = seq_lseek,
290         .release        = single_release,
291 };
292
293 static int dev_class_show(struct seq_file *f, void *ptr)
294 {
295         struct hci_dev *hdev = f->private;
296
297         hci_dev_lock(hdev);
298         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299                    hdev->dev_class[1], hdev->dev_class[0]);
300         hci_dev_unlock(hdev);
301
302         return 0;
303 }
304
305 static int dev_class_open(struct inode *inode, struct file *file)
306 {
307         return single_open(file, dev_class_show, inode->i_private);
308 }
309
310 static const struct file_operations dev_class_fops = {
311         .open           = dev_class_open,
312         .read           = seq_read,
313         .llseek         = seq_lseek,
314         .release        = single_release,
315 };
316
317 static int voice_setting_get(void *data, u64 *val)
318 {
319         struct hci_dev *hdev = data;
320
321         hci_dev_lock(hdev);
322         *val = hdev->voice_setting;
323         hci_dev_unlock(hdev);
324
325         return 0;
326 }
327
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329                         NULL, "0x%4.4llx\n");
330
331 static int auto_accept_delay_set(void *data, u64 val)
332 {
333         struct hci_dev *hdev = data;
334
335         hci_dev_lock(hdev);
336         hdev->auto_accept_delay = val;
337         hci_dev_unlock(hdev);
338
339         return 0;
340 }
341
342 static int auto_accept_delay_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->auto_accept_delay;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354                         auto_accept_delay_set, "%llu\n");
355
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357                                      size_t count, loff_t *ppos)
358 {
359         struct hci_dev *hdev = file->private_data;
360         char buf[3];
361
362         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363         buf[1] = '\n';
364         buf[2] = '\0';
365         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366 }
367
368 static ssize_t force_sc_support_write(struct file *file,
369                                       const char __user *user_buf,
370                                       size_t count, loff_t *ppos)
371 {
372         struct hci_dev *hdev = file->private_data;
373         char buf[32];
374         size_t buf_size = min(count, (sizeof(buf)-1));
375         bool enable;
376
377         if (test_bit(HCI_UP, &hdev->flags))
378                 return -EBUSY;
379
380         if (copy_from_user(buf, user_buf, buf_size))
381                 return -EFAULT;
382
383         buf[buf_size] = '\0';
384         if (strtobool(buf, &enable))
385                 return -EINVAL;
386
387         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388                 return -EALREADY;
389
390         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
391
392         return count;
393 }
394
395 static const struct file_operations force_sc_support_fops = {
396         .open           = simple_open,
397         .read           = force_sc_support_read,
398         .write          = force_sc_support_write,
399         .llseek         = default_llseek,
400 };
401
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403                                  size_t count, loff_t *ppos)
404 {
405         struct hci_dev *hdev = file->private_data;
406         char buf[3];
407
408         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409         buf[1] = '\n';
410         buf[2] = '\0';
411         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412 }
413
414 static const struct file_operations sc_only_mode_fops = {
415         .open           = simple_open,
416         .read           = sc_only_mode_read,
417         .llseek         = default_llseek,
418 };
419
420 static int idle_timeout_set(void *data, u64 val)
421 {
422         struct hci_dev *hdev = data;
423
424         if (val != 0 && (val < 500 || val > 3600000))
425                 return -EINVAL;
426
427         hci_dev_lock(hdev);
428         hdev->idle_timeout = val;
429         hci_dev_unlock(hdev);
430
431         return 0;
432 }
433
434 static int idle_timeout_get(void *data, u64 *val)
435 {
436         struct hci_dev *hdev = data;
437
438         hci_dev_lock(hdev);
439         *val = hdev->idle_timeout;
440         hci_dev_unlock(hdev);
441
442         return 0;
443 }
444
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446                         idle_timeout_set, "%llu\n");
447
448 static int rpa_timeout_set(void *data, u64 val)
449 {
450         struct hci_dev *hdev = data;
451
452         /* Require the RPA timeout to be at least 30 seconds and at most
453          * 24 hours.
454          */
455         if (val < 30 || val > (60 * 60 * 24))
456                 return -EINVAL;
457
458         hci_dev_lock(hdev);
459         hdev->rpa_timeout = val;
460         hci_dev_unlock(hdev);
461
462         return 0;
463 }
464
465 static int rpa_timeout_get(void *data, u64 *val)
466 {
467         struct hci_dev *hdev = data;
468
469         hci_dev_lock(hdev);
470         *val = hdev->rpa_timeout;
471         hci_dev_unlock(hdev);
472
473         return 0;
474 }
475
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477                         rpa_timeout_set, "%llu\n");
478
479 static int sniff_min_interval_set(void *data, u64 val)
480 {
481         struct hci_dev *hdev = data;
482
483         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484                 return -EINVAL;
485
486         hci_dev_lock(hdev);
487         hdev->sniff_min_interval = val;
488         hci_dev_unlock(hdev);
489
490         return 0;
491 }
492
493 static int sniff_min_interval_get(void *data, u64 *val)
494 {
495         struct hci_dev *hdev = data;
496
497         hci_dev_lock(hdev);
498         *val = hdev->sniff_min_interval;
499         hci_dev_unlock(hdev);
500
501         return 0;
502 }
503
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505                         sniff_min_interval_set, "%llu\n");
506
507 static int sniff_max_interval_set(void *data, u64 val)
508 {
509         struct hci_dev *hdev = data;
510
511         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512                 return -EINVAL;
513
514         hci_dev_lock(hdev);
515         hdev->sniff_max_interval = val;
516         hci_dev_unlock(hdev);
517
518         return 0;
519 }
520
521 static int sniff_max_interval_get(void *data, u64 *val)
522 {
523         struct hci_dev *hdev = data;
524
525         hci_dev_lock(hdev);
526         *val = hdev->sniff_max_interval;
527         hci_dev_unlock(hdev);
528
529         return 0;
530 }
531
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533                         sniff_max_interval_set, "%llu\n");
534
535 static int conn_info_min_age_set(void *data, u64 val)
536 {
537         struct hci_dev *hdev = data;
538
539         if (val == 0 || val > hdev->conn_info_max_age)
540                 return -EINVAL;
541
542         hci_dev_lock(hdev);
543         hdev->conn_info_min_age = val;
544         hci_dev_unlock(hdev);
545
546         return 0;
547 }
548
549 static int conn_info_min_age_get(void *data, u64 *val)
550 {
551         struct hci_dev *hdev = data;
552
553         hci_dev_lock(hdev);
554         *val = hdev->conn_info_min_age;
555         hci_dev_unlock(hdev);
556
557         return 0;
558 }
559
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561                         conn_info_min_age_set, "%llu\n");
562
563 static int conn_info_max_age_set(void *data, u64 val)
564 {
565         struct hci_dev *hdev = data;
566
567         if (val == 0 || val < hdev->conn_info_min_age)
568                 return -EINVAL;
569
570         hci_dev_lock(hdev);
571         hdev->conn_info_max_age = val;
572         hci_dev_unlock(hdev);
573
574         return 0;
575 }
576
577 static int conn_info_max_age_get(void *data, u64 *val)
578 {
579         struct hci_dev *hdev = data;
580
581         hci_dev_lock(hdev);
582         *val = hdev->conn_info_max_age;
583         hci_dev_unlock(hdev);
584
585         return 0;
586 }
587
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589                         conn_info_max_age_set, "%llu\n");
590
591 static int identity_show(struct seq_file *f, void *p)
592 {
593         struct hci_dev *hdev = f->private;
594         bdaddr_t addr;
595         u8 addr_type;
596
597         hci_dev_lock(hdev);
598
599         hci_copy_identity_address(hdev, &addr, &addr_type);
600
601         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602                    16, hdev->irk, &hdev->rpa);
603
604         hci_dev_unlock(hdev);
605
606         return 0;
607 }
608
609 static int identity_open(struct inode *inode, struct file *file)
610 {
611         return single_open(file, identity_show, inode->i_private);
612 }
613
614 static const struct file_operations identity_fops = {
615         .open           = identity_open,
616         .read           = seq_read,
617         .llseek         = seq_lseek,
618         .release        = single_release,
619 };
620
621 static int random_address_show(struct seq_file *f, void *p)
622 {
623         struct hci_dev *hdev = f->private;
624
625         hci_dev_lock(hdev);
626         seq_printf(f, "%pMR\n", &hdev->random_addr);
627         hci_dev_unlock(hdev);
628
629         return 0;
630 }
631
632 static int random_address_open(struct inode *inode, struct file *file)
633 {
634         return single_open(file, random_address_show, inode->i_private);
635 }
636
637 static const struct file_operations random_address_fops = {
638         .open           = random_address_open,
639         .read           = seq_read,
640         .llseek         = seq_lseek,
641         .release        = single_release,
642 };
643
644 static int static_address_show(struct seq_file *f, void *p)
645 {
646         struct hci_dev *hdev = f->private;
647
648         hci_dev_lock(hdev);
649         seq_printf(f, "%pMR\n", &hdev->static_addr);
650         hci_dev_unlock(hdev);
651
652         return 0;
653 }
654
655 static int static_address_open(struct inode *inode, struct file *file)
656 {
657         return single_open(file, static_address_show, inode->i_private);
658 }
659
660 static const struct file_operations static_address_fops = {
661         .open           = static_address_open,
662         .read           = seq_read,
663         .llseek         = seq_lseek,
664         .release        = single_release,
665 };
666
667 static ssize_t force_static_address_read(struct file *file,
668                                          char __user *user_buf,
669                                          size_t count, loff_t *ppos)
670 {
671         struct hci_dev *hdev = file->private_data;
672         char buf[3];
673
674         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675         buf[1] = '\n';
676         buf[2] = '\0';
677         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678 }
679
680 static ssize_t force_static_address_write(struct file *file,
681                                           const char __user *user_buf,
682                                           size_t count, loff_t *ppos)
683 {
684         struct hci_dev *hdev = file->private_data;
685         char buf[32];
686         size_t buf_size = min(count, (sizeof(buf)-1));
687         bool enable;
688
689         if (test_bit(HCI_UP, &hdev->flags))
690                 return -EBUSY;
691
692         if (copy_from_user(buf, user_buf, buf_size))
693                 return -EFAULT;
694
695         buf[buf_size] = '\0';
696         if (strtobool(buf, &enable))
697                 return -EINVAL;
698
699         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700                 return -EALREADY;
701
702         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
703
704         return count;
705 }
706
707 static const struct file_operations force_static_address_fops = {
708         .open           = simple_open,
709         .read           = force_static_address_read,
710         .write          = force_static_address_write,
711         .llseek         = default_llseek,
712 };
713
714 static int white_list_show(struct seq_file *f, void *ptr)
715 {
716         struct hci_dev *hdev = f->private;
717         struct bdaddr_list *b;
718
719         hci_dev_lock(hdev);
720         list_for_each_entry(b, &hdev->le_white_list, list)
721                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722         hci_dev_unlock(hdev);
723
724         return 0;
725 }
726
727 static int white_list_open(struct inode *inode, struct file *file)
728 {
729         return single_open(file, white_list_show, inode->i_private);
730 }
731
732 static const struct file_operations white_list_fops = {
733         .open           = white_list_open,
734         .read           = seq_read,
735         .llseek         = seq_lseek,
736         .release        = single_release,
737 };
738
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct list_head *p, *n;
743
744         hci_dev_lock(hdev);
745         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748                            &irk->bdaddr, irk->addr_type,
749                            16, irk->val, &irk->rpa);
750         }
751         hci_dev_unlock(hdev);
752
753         return 0;
754 }
755
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 {
758         return single_open(file, identity_resolving_keys_show,
759                            inode->i_private);
760 }
761
762 static const struct file_operations identity_resolving_keys_fops = {
763         .open           = identity_resolving_keys_open,
764         .read           = seq_read,
765         .llseek         = seq_lseek,
766         .release        = single_release,
767 };
768
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 {
771         struct hci_dev *hdev = f->private;
772         struct list_head *p, *n;
773
774         hci_dev_lock(hdev);
775         list_for_each_safe(p, n, &hdev->long_term_keys) {
776                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780                            __le64_to_cpu(ltk->rand), 16, ltk->val);
781         }
782         hci_dev_unlock(hdev);
783
784         return 0;
785 }
786
787 static int long_term_keys_open(struct inode *inode, struct file *file)
788 {
789         return single_open(file, long_term_keys_show, inode->i_private);
790 }
791
792 static const struct file_operations long_term_keys_fops = {
793         .open           = long_term_keys_open,
794         .read           = seq_read,
795         .llseek         = seq_lseek,
796         .release        = single_release,
797 };
798
799 static int conn_min_interval_set(void *data, u64 val)
800 {
801         struct hci_dev *hdev = data;
802
803         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804                 return -EINVAL;
805
806         hci_dev_lock(hdev);
807         hdev->le_conn_min_interval = val;
808         hci_dev_unlock(hdev);
809
810         return 0;
811 }
812
813 static int conn_min_interval_get(void *data, u64 *val)
814 {
815         struct hci_dev *hdev = data;
816
817         hci_dev_lock(hdev);
818         *val = hdev->le_conn_min_interval;
819         hci_dev_unlock(hdev);
820
821         return 0;
822 }
823
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825                         conn_min_interval_set, "%llu\n");
826
827 static int conn_max_interval_set(void *data, u64 val)
828 {
829         struct hci_dev *hdev = data;
830
831         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832                 return -EINVAL;
833
834         hci_dev_lock(hdev);
835         hdev->le_conn_max_interval = val;
836         hci_dev_unlock(hdev);
837
838         return 0;
839 }
840
841 static int conn_max_interval_get(void *data, u64 *val)
842 {
843         struct hci_dev *hdev = data;
844
845         hci_dev_lock(hdev);
846         *val = hdev->le_conn_max_interval;
847         hci_dev_unlock(hdev);
848
849         return 0;
850 }
851
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853                         conn_max_interval_set, "%llu\n");
854
855 static int conn_latency_set(void *data, u64 val)
856 {
857         struct hci_dev *hdev = data;
858
859         if (val > 0x01f3)
860                 return -EINVAL;
861
862         hci_dev_lock(hdev);
863         hdev->le_conn_latency = val;
864         hci_dev_unlock(hdev);
865
866         return 0;
867 }
868
869 static int conn_latency_get(void *data, u64 *val)
870 {
871         struct hci_dev *hdev = data;
872
873         hci_dev_lock(hdev);
874         *val = hdev->le_conn_latency;
875         hci_dev_unlock(hdev);
876
877         return 0;
878 }
879
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881                         conn_latency_set, "%llu\n");
882
883 static int supervision_timeout_set(void *data, u64 val)
884 {
885         struct hci_dev *hdev = data;
886
887         if (val < 0x000a || val > 0x0c80)
888                 return -EINVAL;
889
890         hci_dev_lock(hdev);
891         hdev->le_supv_timeout = val;
892         hci_dev_unlock(hdev);
893
894         return 0;
895 }
896
897 static int supervision_timeout_get(void *data, u64 *val)
898 {
899         struct hci_dev *hdev = data;
900
901         hci_dev_lock(hdev);
902         *val = hdev->le_supv_timeout;
903         hci_dev_unlock(hdev);
904
905         return 0;
906 }
907
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909                         supervision_timeout_set, "%llu\n");
910
911 static int adv_channel_map_set(void *data, u64 val)
912 {
913         struct hci_dev *hdev = data;
914
915         if (val < 0x01 || val > 0x07)
916                 return -EINVAL;
917
918         hci_dev_lock(hdev);
919         hdev->le_adv_channel_map = val;
920         hci_dev_unlock(hdev);
921
922         return 0;
923 }
924
925 static int adv_channel_map_get(void *data, u64 *val)
926 {
927         struct hci_dev *hdev = data;
928
929         hci_dev_lock(hdev);
930         *val = hdev->le_adv_channel_map;
931         hci_dev_unlock(hdev);
932
933         return 0;
934 }
935
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937                         adv_channel_map_set, "%llu\n");
938
939 static int device_list_show(struct seq_file *f, void *ptr)
940 {
941         struct hci_dev *hdev = f->private;
942         struct hci_conn_params *p;
943
944         hci_dev_lock(hdev);
945         list_for_each_entry(p, &hdev->le_conn_params, list) {
946                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947                            p->auto_connect);
948         }
949         hci_dev_unlock(hdev);
950
951         return 0;
952 }
953
954 static int device_list_open(struct inode *inode, struct file *file)
955 {
956         return single_open(file, device_list_show, inode->i_private);
957 }
958
959 static const struct file_operations device_list_fops = {
960         .open           = device_list_open,
961         .read           = seq_read,
962         .llseek         = seq_lseek,
963         .release        = single_release,
964 };
965
966 /* ---- HCI requests ---- */
967
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
969 {
970         BT_DBG("%s result 0x%2.2x", hdev->name, result);
971
972         if (hdev->req_status == HCI_REQ_PEND) {
973                 hdev->req_result = result;
974                 hdev->req_status = HCI_REQ_DONE;
975                 wake_up_interruptible(&hdev->req_wait_q);
976         }
977 }
978
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
980 {
981         BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983         if (hdev->req_status == HCI_REQ_PEND) {
984                 hdev->req_result = err;
985                 hdev->req_status = HCI_REQ_CANCELED;
986                 wake_up_interruptible(&hdev->req_wait_q);
987         }
988 }
989
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991                                             u8 event)
992 {
993         struct hci_ev_cmd_complete *ev;
994         struct hci_event_hdr *hdr;
995         struct sk_buff *skb;
996
997         hci_dev_lock(hdev);
998
999         skb = hdev->recv_evt;
1000         hdev->recv_evt = NULL;
1001
1002         hci_dev_unlock(hdev);
1003
1004         if (!skb)
1005                 return ERR_PTR(-ENODATA);
1006
1007         if (skb->len < sizeof(*hdr)) {
1008                 BT_ERR("Too short HCI event");
1009                 goto failed;
1010         }
1011
1012         hdr = (void *) skb->data;
1013         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
1015         if (event) {
1016                 if (hdr->evt != event)
1017                         goto failed;
1018                 return skb;
1019         }
1020
1021         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023                 goto failed;
1024         }
1025
1026         if (skb->len < sizeof(*ev)) {
1027                 BT_ERR("Too short cmd_complete event");
1028                 goto failed;
1029         }
1030
1031         ev = (void *) skb->data;
1032         skb_pull(skb, sizeof(*ev));
1033
1034         if (opcode == __le16_to_cpu(ev->opcode))
1035                 return skb;
1036
1037         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038                __le16_to_cpu(ev->opcode));
1039
1040 failed:
1041         kfree_skb(skb);
1042         return ERR_PTR(-ENODATA);
1043 }
1044
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046                                   const void *param, u8 event, u32 timeout)
1047 {
1048         DECLARE_WAITQUEUE(wait, current);
1049         struct hci_request req;
1050         int err = 0;
1051
1052         BT_DBG("%s", hdev->name);
1053
1054         hci_req_init(&req, hdev);
1055
1056         hci_req_add_ev(&req, opcode, plen, param, event);
1057
1058         hdev->req_status = HCI_REQ_PEND;
1059
1060         err = hci_req_run(&req, hci_req_sync_complete);
1061         if (err < 0)
1062                 return ERR_PTR(err);
1063
1064         add_wait_queue(&hdev->req_wait_q, &wait);
1065         set_current_state(TASK_INTERRUPTIBLE);
1066
1067         schedule_timeout(timeout);
1068
1069         remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071         if (signal_pending(current))
1072                 return ERR_PTR(-EINTR);
1073
1074         switch (hdev->req_status) {
1075         case HCI_REQ_DONE:
1076                 err = -bt_to_errno(hdev->req_result);
1077                 break;
1078
1079         case HCI_REQ_CANCELED:
1080                 err = -hdev->req_result;
1081                 break;
1082
1083         default:
1084                 err = -ETIMEDOUT;
1085                 break;
1086         }
1087
1088         hdev->req_status = hdev->req_result = 0;
1089
1090         BT_DBG("%s end: err %d", hdev->name, err);
1091
1092         if (err < 0)
1093                 return ERR_PTR(err);
1094
1095         return hci_get_cmd_complete(hdev, opcode, event);
1096 }
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100                                const void *param, u32 timeout)
1101 {
1102         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1103 }
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1105
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108                           void (*func)(struct hci_request *req,
1109                                       unsigned long opt),
1110                           unsigned long opt, __u32 timeout)
1111 {
1112         struct hci_request req;
1113         DECLARE_WAITQUEUE(wait, current);
1114         int err = 0;
1115
1116         BT_DBG("%s start", hdev->name);
1117
1118         hci_req_init(&req, hdev);
1119
1120         hdev->req_status = HCI_REQ_PEND;
1121
1122         func(&req, opt);
1123
1124         err = hci_req_run(&req, hci_req_sync_complete);
1125         if (err < 0) {
1126                 hdev->req_status = 0;
1127
1128                 /* ENODATA means the HCI request command queue is empty.
1129                  * This can happen when a request with conditionals doesn't
1130                  * trigger any commands to be sent. This is normal behavior
1131                  * and should not trigger an error return.
1132                  */
1133                 if (err == -ENODATA)
1134                         return 0;
1135
1136                 return err;
1137         }
1138
1139         add_wait_queue(&hdev->req_wait_q, &wait);
1140         set_current_state(TASK_INTERRUPTIBLE);
1141
1142         schedule_timeout(timeout);
1143
1144         remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146         if (signal_pending(current))
1147                 return -EINTR;
1148
1149         switch (hdev->req_status) {
1150         case HCI_REQ_DONE:
1151                 err = -bt_to_errno(hdev->req_result);
1152                 break;
1153
1154         case HCI_REQ_CANCELED:
1155                 err = -hdev->req_result;
1156                 break;
1157
1158         default:
1159                 err = -ETIMEDOUT;
1160                 break;
1161         }
1162
1163         hdev->req_status = hdev->req_result = 0;
1164
1165         BT_DBG("%s end: err %d", hdev->name, err);
1166
1167         return err;
1168 }
1169
1170 static int hci_req_sync(struct hci_dev *hdev,
1171                         void (*req)(struct hci_request *req,
1172                                     unsigned long opt),
1173                         unsigned long opt, __u32 timeout)
1174 {
1175         int ret;
1176
1177         if (!test_bit(HCI_UP, &hdev->flags))
1178                 return -ENETDOWN;
1179
1180         /* Serialize all requests */
1181         hci_req_lock(hdev);
1182         ret = __hci_req_sync(hdev, req, opt, timeout);
1183         hci_req_unlock(hdev);
1184
1185         return ret;
1186 }
1187
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1189 {
1190         BT_DBG("%s %ld", req->hdev->name, opt);
1191
1192         /* Reset device */
1193         set_bit(HCI_RESET, &req->hdev->flags);
1194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1195 }
1196
1197 static void bredr_init(struct hci_request *req)
1198 {
1199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1200
1201         /* Read Local Supported Features */
1202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1203
1204         /* Read Local Version */
1205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1206
1207         /* Read BD Address */
1208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1209 }
1210
1211 static void amp_init(struct hci_request *req)
1212 {
1213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1214
1215         /* Read Local Version */
1216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1217
1218         /* Read Local Supported Commands */
1219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221         /* Read Local Supported Features */
1222         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
1224         /* Read Local AMP Info */
1225         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1226
1227         /* Read Data Blk size */
1228         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1229
1230         /* Read Flow Control Mode */
1231         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
1233         /* Read Location Data */
1234         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1235 }
1236
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1238 {
1239         struct hci_dev *hdev = req->hdev;
1240
1241         BT_DBG("%s %ld", hdev->name, opt);
1242
1243         /* Reset */
1244         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245                 hci_reset_req(req, 0);
1246
1247         switch (hdev->dev_type) {
1248         case HCI_BREDR:
1249                 bredr_init(req);
1250                 break;
1251
1252         case HCI_AMP:
1253                 amp_init(req);
1254                 break;
1255
1256         default:
1257                 BT_ERR("Unknown device type %d", hdev->dev_type);
1258                 break;
1259         }
1260 }
1261
1262 static void bredr_setup(struct hci_request *req)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         __le16 param;
1267         __u8 flt_type;
1268
1269         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1271
1272         /* Read Class of Device */
1273         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1274
1275         /* Read Local Name */
1276         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1277
1278         /* Read Voice Setting */
1279         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1280
1281         /* Read Number of Supported IAC */
1282         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
1284         /* Read Current IAC LAP */
1285         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
1287         /* Clear Event Filters */
1288         flt_type = HCI_FLT_CLEAR_ALL;
1289         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1290
1291         /* Connection accept timeout ~20 secs */
1292         param = cpu_to_le16(0x7d00);
1293         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1294
1295         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296          * but it does not support page scan related HCI commands.
1297          */
1298         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301         }
1302 }
1303
1304 static void le_setup(struct hci_request *req)
1305 {
1306         struct hci_dev *hdev = req->hdev;
1307
1308         /* Read LE Buffer Size */
1309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1310
1311         /* Read LE Local Supported Features */
1312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read LE Supported States */
1315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
1317         /* Read LE Advertising Channel TX Power */
1318         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1319
1320         /* Read LE White List Size */
1321         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1322
1323         /* Clear LE White List */
1324         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1325
1326         /* LE-only controllers have LE implicitly enabled */
1327         if (!lmp_bredr_capable(hdev))
1328                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1329 }
1330
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332 {
1333         if (lmp_ext_inq_capable(hdev))
1334                 return 0x02;
1335
1336         if (lmp_inq_rssi_capable(hdev))
1337                 return 0x01;
1338
1339         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340             hdev->lmp_subver == 0x0757)
1341                 return 0x01;
1342
1343         if (hdev->manufacturer == 15) {
1344                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345                         return 0x01;
1346                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347                         return 0x01;
1348                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349                         return 0x01;
1350         }
1351
1352         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353             hdev->lmp_subver == 0x1805)
1354                 return 0x01;
1355
1356         return 0x00;
1357 }
1358
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1360 {
1361         u8 mode;
1362
1363         mode = hci_get_inquiry_mode(req->hdev);
1364
1365         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1366 }
1367
1368 static void hci_setup_event_mask(struct hci_request *req)
1369 {
1370         struct hci_dev *hdev = req->hdev;
1371
1372         /* The second byte is 0xff instead of 0x9f (two reserved bits
1373          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374          * command otherwise.
1375          */
1376         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379          * any event mask for pre 1.2 devices.
1380          */
1381         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382                 return;
1383
1384         if (lmp_bredr_capable(hdev)) {
1385                 events[4] |= 0x01; /* Flow Specification Complete */
1386                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388                 events[5] |= 0x08; /* Synchronous Connection Complete */
1389                 events[5] |= 0x10; /* Synchronous Connection Changed */
1390         } else {
1391                 /* Use a different default for LE-only devices */
1392                 memset(events, 0, sizeof(events));
1393                 events[0] |= 0x10; /* Disconnection Complete */
1394                 events[0] |= 0x80; /* Encryption Change */
1395                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396                 events[1] |= 0x20; /* Command Complete */
1397                 events[1] |= 0x40; /* Command Status */
1398                 events[1] |= 0x80; /* Hardware Error */
1399                 events[2] |= 0x04; /* Number of Completed Packets */
1400                 events[3] |= 0x02; /* Data Buffer Overflow */
1401                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1402         }
1403
1404         if (lmp_inq_rssi_capable(hdev))
1405                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407         if (lmp_sniffsubr_capable(hdev))
1408                 events[5] |= 0x20; /* Sniff Subrating */
1409
1410         if (lmp_pause_enc_capable(hdev))
1411                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413         if (lmp_ext_inq_capable(hdev))
1414                 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416         if (lmp_no_flush_capable(hdev))
1417                 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419         if (lmp_lsto_capable(hdev))
1420                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422         if (lmp_ssp_capable(hdev)) {
1423                 events[6] |= 0x01;      /* IO Capability Request */
1424                 events[6] |= 0x02;      /* IO Capability Response */
1425                 events[6] |= 0x04;      /* User Confirmation Request */
1426                 events[6] |= 0x08;      /* User Passkey Request */
1427                 events[6] |= 0x10;      /* Remote OOB Data Request */
1428                 events[6] |= 0x20;      /* Simple Pairing Complete */
1429                 events[7] |= 0x04;      /* User Passkey Notification */
1430                 events[7] |= 0x08;      /* Keypress Notification */
1431                 events[7] |= 0x10;      /* Remote Host Supported
1432                                          * Features Notification
1433                                          */
1434         }
1435
1436         if (lmp_le_capable(hdev))
1437                 events[7] |= 0x20;      /* LE Meta-Event */
1438
1439         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1440 }
1441
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1443 {
1444         struct hci_dev *hdev = req->hdev;
1445
1446         if (lmp_bredr_capable(hdev))
1447                 bredr_setup(req);
1448         else
1449                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1450
1451         if (lmp_le_capable(hdev))
1452                 le_setup(req);
1453
1454         hci_setup_event_mask(req);
1455
1456         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457          * local supported commands HCI command.
1458          */
1459         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1461
1462         if (lmp_ssp_capable(hdev)) {
1463                 /* When SSP is available, then the host features page
1464                  * should also be available as well. However some
1465                  * controllers list the max_page as 0 as long as SSP
1466                  * has not been enabled. To achieve proper debugging
1467                  * output, force the minimum max_page to 1 at least.
1468                  */
1469                 hdev->max_page = 0x01;
1470
1471                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472                         u8 mode = 0x01;
1473                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474                                     sizeof(mode), &mode);
1475                 } else {
1476                         struct hci_cp_write_eir cp;
1477
1478                         memset(hdev->eir, 0, sizeof(hdev->eir));
1479                         memset(&cp, 0, sizeof(cp));
1480
1481                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1482                 }
1483         }
1484
1485         if (lmp_inq_rssi_capable(hdev))
1486                 hci_setup_inquiry_mode(req);
1487
1488         if (lmp_inq_tx_pwr_capable(hdev))
1489                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1490
1491         if (lmp_ext_feat_capable(hdev)) {
1492                 struct hci_cp_read_local_ext_features cp;
1493
1494                 cp.page = 0x01;
1495                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496                             sizeof(cp), &cp);
1497         }
1498
1499         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500                 u8 enable = 1;
1501                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502                             &enable);
1503         }
1504 }
1505
1506 static void hci_setup_link_policy(struct hci_request *req)
1507 {
1508         struct hci_dev *hdev = req->hdev;
1509         struct hci_cp_write_def_link_policy cp;
1510         u16 link_policy = 0;
1511
1512         if (lmp_rswitch_capable(hdev))
1513                 link_policy |= HCI_LP_RSWITCH;
1514         if (lmp_hold_capable(hdev))
1515                 link_policy |= HCI_LP_HOLD;
1516         if (lmp_sniff_capable(hdev))
1517                 link_policy |= HCI_LP_SNIFF;
1518         if (lmp_park_capable(hdev))
1519                 link_policy |= HCI_LP_PARK;
1520
1521         cp.policy = cpu_to_le16(link_policy);
1522         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1523 }
1524
1525 static void hci_set_le_support(struct hci_request *req)
1526 {
1527         struct hci_dev *hdev = req->hdev;
1528         struct hci_cp_write_le_host_supported cp;
1529
1530         /* LE-only devices do not support explicit enablement */
1531         if (!lmp_bredr_capable(hdev))
1532                 return;
1533
1534         memset(&cp, 0, sizeof(cp));
1535
1536         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537                 cp.le = 0x01;
1538                 cp.simul = lmp_le_br_capable(hdev);
1539         }
1540
1541         if (cp.le != lmp_host_le_capable(hdev))
1542                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543                             &cp);
1544 }
1545
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1547 {
1548         struct hci_dev *hdev = req->hdev;
1549         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551         /* If Connectionless Slave Broadcast master role is supported
1552          * enable all necessary events for it.
1553          */
1554         if (lmp_csb_master_capable(hdev)) {
1555                 events[1] |= 0x40;      /* Triggered Clock Capture */
1556                 events[1] |= 0x80;      /* Synchronization Train Complete */
1557                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1558                 events[2] |= 0x20;      /* CSB Channel Map Change */
1559         }
1560
1561         /* If Connectionless Slave Broadcast slave role is supported
1562          * enable all necessary events for it.
1563          */
1564         if (lmp_csb_slave_capable(hdev)) {
1565                 events[2] |= 0x01;      /* Synchronization Train Received */
1566                 events[2] |= 0x02;      /* CSB Receive */
1567                 events[2] |= 0x04;      /* CSB Timeout */
1568                 events[2] |= 0x08;      /* Truncated Page Complete */
1569         }
1570
1571         /* Enable Authenticated Payload Timeout Expired event if supported */
1572         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1573                 events[2] |= 0x80;
1574
1575         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576 }
1577
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 p;
1582
1583         /* Some Broadcom based Bluetooth controllers do not support the
1584          * Delete Stored Link Key command. They are clearly indicating its
1585          * absence in the bit mask of supported commands.
1586          *
1587          * Check the supported commands and only if the the command is marked
1588          * as supported send it. If not supported assume that the controller
1589          * does not have actual support for stored link keys which makes this
1590          * command redundant anyway.
1591          *
1592          * Some controllers indicate that they support handling deleting
1593          * stored link keys, but they don't. The quirk lets a driver
1594          * just disable this command.
1595          */
1596         if (hdev->commands[6] & 0x80 &&
1597             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598                 struct hci_cp_delete_stored_link_key cp;
1599
1600                 bacpy(&cp.bdaddr, BDADDR_ANY);
1601                 cp.delete_all = 0x01;
1602                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603                             sizeof(cp), &cp);
1604         }
1605
1606         if (hdev->commands[5] & 0x10)
1607                 hci_setup_link_policy(req);
1608
1609         if (lmp_le_capable(hdev)) {
1610                 u8 events[8];
1611
1612                 memset(events, 0, sizeof(events));
1613                 events[0] = 0x1f;
1614
1615                 /* If controller supports the Connection Parameters Request
1616                  * Link Layer Procedure, enable the corresponding event.
1617                  */
1618                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619                         events[0] |= 0x20;      /* LE Remote Connection
1620                                                  * Parameter Request
1621                                                  */
1622
1623                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624                             events);
1625
1626                 hci_set_le_support(req);
1627         }
1628
1629         /* Read features beyond page 1 if available */
1630         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631                 struct hci_cp_read_local_ext_features cp;
1632
1633                 cp.page = p;
1634                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635                             sizeof(cp), &cp);
1636         }
1637 }
1638
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640 {
1641         struct hci_dev *hdev = req->hdev;
1642
1643         /* Set event mask page 2 if the HCI command for it is supported */
1644         if (hdev->commands[22] & 0x04)
1645                 hci_set_event_mask_page_2(req);
1646
1647         /* Check for Synchronization Train support */
1648         if (lmp_sync_train_capable(hdev))
1649                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1650
1651         /* Enable Secure Connections if supported and configured */
1652         if ((lmp_sc_capable(hdev) ||
1653              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655                 u8 support = 0x01;
1656                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657                             sizeof(support), &support);
1658         }
1659 }
1660
1661 static int __hci_init(struct hci_dev *hdev)
1662 {
1663         int err;
1664
1665         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666         if (err < 0)
1667                 return err;
1668
1669         /* The Device Under Test (DUT) mode is special and available for
1670          * all controller types. So just create it early on.
1671          */
1672         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674                                     &dut_mode_fops);
1675         }
1676
1677         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678          * BR/EDR/LE type controllers. AMP controllers only need the
1679          * first stage init.
1680          */
1681         if (hdev->dev_type != HCI_BREDR)
1682                 return 0;
1683
1684         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685         if (err < 0)
1686                 return err;
1687
1688         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689         if (err < 0)
1690                 return err;
1691
1692         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693         if (err < 0)
1694                 return err;
1695
1696         /* Only create debugfs entries during the initial setup
1697          * phase and not every time the controller gets powered on.
1698          */
1699         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700                 return 0;
1701
1702         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703                             &features_fops);
1704         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705                            &hdev->manufacturer);
1706         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709                             &blacklist_fops);
1710         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
1712         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713                             &conn_info_min_age_fops);
1714         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715                             &conn_info_max_age_fops);
1716
1717         if (lmp_bredr_capable(hdev)) {
1718                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719                                     hdev, &inquiry_cache_fops);
1720                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721                                     hdev, &link_keys_fops);
1722                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723                                     hdev, &dev_class_fops);
1724                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725                                     hdev, &voice_setting_fops);
1726         }
1727
1728         if (lmp_ssp_capable(hdev)) {
1729                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730                                     hdev, &auto_accept_delay_fops);
1731                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732                                     hdev, &force_sc_support_fops);
1733                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734                                     hdev, &sc_only_mode_fops);
1735         }
1736
1737         if (lmp_sniff_capable(hdev)) {
1738                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739                                     hdev, &idle_timeout_fops);
1740                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741                                     hdev, &sniff_min_interval_fops);
1742                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743                                     hdev, &sniff_max_interval_fops);
1744         }
1745
1746         if (lmp_le_capable(hdev)) {
1747                 debugfs_create_file("identity", 0400, hdev->debugfs,
1748                                     hdev, &identity_fops);
1749                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750                                     hdev, &rpa_timeout_fops);
1751                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752                                     hdev, &random_address_fops);
1753                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754                                     hdev, &static_address_fops);
1755
1756                 /* For controllers with a public address, provide a debug
1757                  * option to force the usage of the configured static
1758                  * address. By default the public address is used.
1759                  */
1760                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761                         debugfs_create_file("force_static_address", 0644,
1762                                             hdev->debugfs, hdev,
1763                                             &force_static_address_fops);
1764
1765                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766                                   &hdev->le_white_list_size);
1767                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768                                     &white_list_fops);
1769                 debugfs_create_file("identity_resolving_keys", 0400,
1770                                     hdev->debugfs, hdev,
1771                                     &identity_resolving_keys_fops);
1772                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773                                     hdev, &long_term_keys_fops);
1774                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775                                     hdev, &conn_min_interval_fops);
1776                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777                                     hdev, &conn_max_interval_fops);
1778                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779                                     hdev, &conn_latency_fops);
1780                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781                                     hdev, &supervision_timeout_fops);
1782                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783                                     hdev, &adv_channel_map_fops);
1784                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785                                     &device_list_fops);
1786                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787                                    hdev->debugfs,
1788                                    &hdev->discov_interleaved_timeout);
1789         }
1790
1791         return 0;
1792 }
1793
1794 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1795 {
1796         struct hci_dev *hdev = req->hdev;
1797
1798         BT_DBG("%s %ld", hdev->name, opt);
1799
1800         /* Reset */
1801         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802                 hci_reset_req(req, 0);
1803
1804         /* Read Local Version */
1805         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1806
1807         /* Read BD Address */
1808         if (hdev->set_bdaddr)
1809                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1810 }
1811
1812 static int __hci_unconf_init(struct hci_dev *hdev)
1813 {
1814         int err;
1815
1816         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1817                 return 0;
1818
1819         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1820         if (err < 0)
1821                 return err;
1822
1823         return 0;
1824 }
1825
1826 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1827 {
1828         __u8 scan = opt;
1829
1830         BT_DBG("%s %x", req->hdev->name, scan);
1831
1832         /* Inquiry and Page scans */
1833         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1834 }
1835
1836 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1837 {
1838         __u8 auth = opt;
1839
1840         BT_DBG("%s %x", req->hdev->name, auth);
1841
1842         /* Authentication */
1843         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1844 }
1845
1846 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1847 {
1848         __u8 encrypt = opt;
1849
1850         BT_DBG("%s %x", req->hdev->name, encrypt);
1851
1852         /* Encryption */
1853         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1854 }
1855
1856 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1857 {
1858         __le16 policy = cpu_to_le16(opt);
1859
1860         BT_DBG("%s %x", req->hdev->name, policy);
1861
1862         /* Default link policy */
1863         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1864 }
1865
1866 /* Get HCI device by index.
1867  * Device is held on return. */
1868 struct hci_dev *hci_dev_get(int index)
1869 {
1870         struct hci_dev *hdev = NULL, *d;
1871
1872         BT_DBG("%d", index);
1873
1874         if (index < 0)
1875                 return NULL;
1876
1877         read_lock(&hci_dev_list_lock);
1878         list_for_each_entry(d, &hci_dev_list, list) {
1879                 if (d->id == index) {
1880                         hdev = hci_dev_hold(d);
1881                         break;
1882                 }
1883         }
1884         read_unlock(&hci_dev_list_lock);
1885         return hdev;
1886 }
1887
1888 /* ---- Inquiry support ---- */
1889
1890 bool hci_discovery_active(struct hci_dev *hdev)
1891 {
1892         struct discovery_state *discov = &hdev->discovery;
1893
1894         switch (discov->state) {
1895         case DISCOVERY_FINDING:
1896         case DISCOVERY_RESOLVING:
1897                 return true;
1898
1899         default:
1900                 return false;
1901         }
1902 }
1903
1904 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1905 {
1906         int old_state = hdev->discovery.state;
1907
1908         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1909
1910         if (old_state == state)
1911                 return;
1912
1913         hdev->discovery.state = state;
1914
1915         switch (state) {
1916         case DISCOVERY_STOPPED:
1917                 hci_update_background_scan(hdev);
1918
1919                 if (old_state != DISCOVERY_STARTING)
1920                         mgmt_discovering(hdev, 0);
1921                 break;
1922         case DISCOVERY_STARTING:
1923                 break;
1924         case DISCOVERY_FINDING:
1925                 mgmt_discovering(hdev, 1);
1926                 break;
1927         case DISCOVERY_RESOLVING:
1928                 break;
1929         case DISCOVERY_STOPPING:
1930                 break;
1931         }
1932 }
1933
1934 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1935 {
1936         struct discovery_state *cache = &hdev->discovery;
1937         struct inquiry_entry *p, *n;
1938
1939         list_for_each_entry_safe(p, n, &cache->all, all) {
1940                 list_del(&p->all);
1941                 kfree(p);
1942         }
1943
1944         INIT_LIST_HEAD(&cache->unknown);
1945         INIT_LIST_HEAD(&cache->resolve);
1946 }
1947
1948 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1949                                                bdaddr_t *bdaddr)
1950 {
1951         struct discovery_state *cache = &hdev->discovery;
1952         struct inquiry_entry *e;
1953
1954         BT_DBG("cache %p, %pMR", cache, bdaddr);
1955
1956         list_for_each_entry(e, &cache->all, all) {
1957                 if (!bacmp(&e->data.bdaddr, bdaddr))
1958                         return e;
1959         }
1960
1961         return NULL;
1962 }
1963
1964 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1965                                                        bdaddr_t *bdaddr)
1966 {
1967         struct discovery_state *cache = &hdev->discovery;
1968         struct inquiry_entry *e;
1969
1970         BT_DBG("cache %p, %pMR", cache, bdaddr);
1971
1972         list_for_each_entry(e, &cache->unknown, list) {
1973                 if (!bacmp(&e->data.bdaddr, bdaddr))
1974                         return e;
1975         }
1976
1977         return NULL;
1978 }
1979
1980 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1981                                                        bdaddr_t *bdaddr,
1982                                                        int state)
1983 {
1984         struct discovery_state *cache = &hdev->discovery;
1985         struct inquiry_entry *e;
1986
1987         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1988
1989         list_for_each_entry(e, &cache->resolve, list) {
1990                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1991                         return e;
1992                 if (!bacmp(&e->data.bdaddr, bdaddr))
1993                         return e;
1994         }
1995
1996         return NULL;
1997 }
1998
1999 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2000                                       struct inquiry_entry *ie)
2001 {
2002         struct discovery_state *cache = &hdev->discovery;
2003         struct list_head *pos = &cache->resolve;
2004         struct inquiry_entry *p;
2005
2006         list_del(&ie->list);
2007
2008         list_for_each_entry(p, &cache->resolve, list) {
2009                 if (p->name_state != NAME_PENDING &&
2010                     abs(p->data.rssi) >= abs(ie->data.rssi))
2011                         break;
2012                 pos = &p->list;
2013         }
2014
2015         list_add(&ie->list, pos);
2016 }
2017
2018 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2019                              bool name_known)
2020 {
2021         struct discovery_state *cache = &hdev->discovery;
2022         struct inquiry_entry *ie;
2023         u32 flags = 0;
2024
2025         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2026
2027         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2028
2029         if (!data->ssp_mode)
2030                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2031
2032         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2033         if (ie) {
2034                 if (!ie->data.ssp_mode)
2035                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2036
2037                 if (ie->name_state == NAME_NEEDED &&
2038                     data->rssi != ie->data.rssi) {
2039                         ie->data.rssi = data->rssi;
2040                         hci_inquiry_cache_update_resolve(hdev, ie);
2041                 }
2042
2043                 goto update;
2044         }
2045
2046         /* Entry not in the cache. Add new one. */
2047         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2048         if (!ie) {
2049                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2050                 goto done;
2051         }
2052
2053         list_add(&ie->all, &cache->all);
2054
2055         if (name_known) {
2056                 ie->name_state = NAME_KNOWN;
2057         } else {
2058                 ie->name_state = NAME_NOT_KNOWN;
2059                 list_add(&ie->list, &cache->unknown);
2060         }
2061
2062 update:
2063         if (name_known && ie->name_state != NAME_KNOWN &&
2064             ie->name_state != NAME_PENDING) {
2065                 ie->name_state = NAME_KNOWN;
2066                 list_del(&ie->list);
2067         }
2068
2069         memcpy(&ie->data, data, sizeof(*data));
2070         ie->timestamp = jiffies;
2071         cache->timestamp = jiffies;
2072
2073         if (ie->name_state == NAME_NOT_KNOWN)
2074                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2075
2076 done:
2077         return flags;
2078 }
2079
2080 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2081 {
2082         struct discovery_state *cache = &hdev->discovery;
2083         struct inquiry_info *info = (struct inquiry_info *) buf;
2084         struct inquiry_entry *e;
2085         int copied = 0;
2086
2087         list_for_each_entry(e, &cache->all, all) {
2088                 struct inquiry_data *data = &e->data;
2089
2090                 if (copied >= num)
2091                         break;
2092
2093                 bacpy(&info->bdaddr, &data->bdaddr);
2094                 info->pscan_rep_mode    = data->pscan_rep_mode;
2095                 info->pscan_period_mode = data->pscan_period_mode;
2096                 info->pscan_mode        = data->pscan_mode;
2097                 memcpy(info->dev_class, data->dev_class, 3);
2098                 info->clock_offset      = data->clock_offset;
2099
2100                 info++;
2101                 copied++;
2102         }
2103
2104         BT_DBG("cache %p, copied %d", cache, copied);
2105         return copied;
2106 }
2107
2108 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2109 {
2110         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2111         struct hci_dev *hdev = req->hdev;
2112         struct hci_cp_inquiry cp;
2113
2114         BT_DBG("%s", hdev->name);
2115
2116         if (test_bit(HCI_INQUIRY, &hdev->flags))
2117                 return;
2118
2119         /* Start Inquiry */
2120         memcpy(&cp.lap, &ir->lap, 3);
2121         cp.length  = ir->length;
2122         cp.num_rsp = ir->num_rsp;
2123         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2124 }
2125
2126 static int wait_inquiry(void *word)
2127 {
2128         schedule();
2129         return signal_pending(current);
2130 }
2131
2132 int hci_inquiry(void __user *arg)
2133 {
2134         __u8 __user *ptr = arg;
2135         struct hci_inquiry_req ir;
2136         struct hci_dev *hdev;
2137         int err = 0, do_inquiry = 0, max_rsp;
2138         long timeo;
2139         __u8 *buf;
2140
2141         if (copy_from_user(&ir, ptr, sizeof(ir)))
2142                 return -EFAULT;
2143
2144         hdev = hci_dev_get(ir.dev_id);
2145         if (!hdev)
2146                 return -ENODEV;
2147
2148         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2149                 err = -EBUSY;
2150                 goto done;
2151         }
2152
2153         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2154                 err = -EOPNOTSUPP;
2155                 goto done;
2156         }
2157
2158         if (hdev->dev_type != HCI_BREDR) {
2159                 err = -EOPNOTSUPP;
2160                 goto done;
2161         }
2162
2163         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2164                 err = -EOPNOTSUPP;
2165                 goto done;
2166         }
2167
2168         hci_dev_lock(hdev);
2169         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2170             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2171                 hci_inquiry_cache_flush(hdev);
2172                 do_inquiry = 1;
2173         }
2174         hci_dev_unlock(hdev);
2175
2176         timeo = ir.length * msecs_to_jiffies(2000);
2177
2178         if (do_inquiry) {
2179                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2180                                    timeo);
2181                 if (err < 0)
2182                         goto done;
2183
2184                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2185                  * cleared). If it is interrupted by a signal, return -EINTR.
2186                  */
2187                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2188                                 TASK_INTERRUPTIBLE))
2189                         return -EINTR;
2190         }
2191
2192         /* for unlimited number of responses we will use buffer with
2193          * 255 entries
2194          */
2195         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2196
2197         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2198          * copy it to the user space.
2199          */
2200         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2201         if (!buf) {
2202                 err = -ENOMEM;
2203                 goto done;
2204         }
2205
2206         hci_dev_lock(hdev);
2207         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2208         hci_dev_unlock(hdev);
2209
2210         BT_DBG("num_rsp %d", ir.num_rsp);
2211
2212         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2213                 ptr += sizeof(ir);
2214                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2215                                  ir.num_rsp))
2216                         err = -EFAULT;
2217         } else
2218                 err = -EFAULT;
2219
2220         kfree(buf);
2221
2222 done:
2223         hci_dev_put(hdev);
2224         return err;
2225 }
2226
2227 static int hci_dev_do_open(struct hci_dev *hdev)
2228 {
2229         int ret = 0;
2230
2231         BT_DBG("%s %p", hdev->name, hdev);
2232
2233         hci_req_lock(hdev);
2234
2235         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2236                 ret = -ENODEV;
2237                 goto done;
2238         }
2239
2240         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2241             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2242                 /* Check for rfkill but allow the HCI setup stage to
2243                  * proceed (which in itself doesn't cause any RF activity).
2244                  */
2245                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2246                         ret = -ERFKILL;
2247                         goto done;
2248                 }
2249
2250                 /* Check for valid public address or a configured static
2251                  * random adddress, but let the HCI setup proceed to
2252                  * be able to determine if there is a public address
2253                  * or not.
2254                  *
2255                  * In case of user channel usage, it is not important
2256                  * if a public address or static random address is
2257                  * available.
2258                  *
2259                  * This check is only valid for BR/EDR controllers
2260                  * since AMP controllers do not have an address.
2261                  */
2262                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2263                     hdev->dev_type == HCI_BREDR &&
2264                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2265                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2266                         ret = -EADDRNOTAVAIL;
2267                         goto done;
2268                 }
2269         }
2270
2271         if (test_bit(HCI_UP, &hdev->flags)) {
2272                 ret = -EALREADY;
2273                 goto done;
2274         }
2275
2276         if (hdev->open(hdev)) {
2277                 ret = -EIO;
2278                 goto done;
2279         }
2280
2281         atomic_set(&hdev->cmd_cnt, 1);
2282         set_bit(HCI_INIT, &hdev->flags);
2283
2284         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2285                 if (hdev->setup)
2286                         ret = hdev->setup(hdev);
2287
2288                 /* The transport driver can set these quirks before
2289                  * creating the HCI device or in its setup callback.
2290                  *
2291                  * In case any of them is set, the controller has to
2292                  * start up as unconfigured.
2293                  */
2294                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2295                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2296                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2297
2298                 /* For an unconfigured controller it is required to
2299                  * read at least the version information provided by
2300                  * the Read Local Version Information command.
2301                  *
2302                  * If the set_bdaddr driver callback is provided, then
2303                  * also the original Bluetooth public device address
2304                  * will be read using the Read BD Address command.
2305                  */
2306                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2307                         ret = __hci_unconf_init(hdev);
2308         }
2309
2310         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2311                 /* If public address change is configured, ensure that
2312                  * the address gets programmed. If the driver does not
2313                  * support changing the public address, fail the power
2314                  * on procedure.
2315                  */
2316                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2317                     hdev->set_bdaddr)
2318                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2319                 else
2320                         ret = -EADDRNOTAVAIL;
2321         }
2322
2323         if (!ret) {
2324                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2325                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2326                         ret = __hci_init(hdev);
2327         }
2328
2329         clear_bit(HCI_INIT, &hdev->flags);
2330
2331         if (!ret) {
2332                 hci_dev_hold(hdev);
2333                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2334                 set_bit(HCI_UP, &hdev->flags);
2335                 hci_notify(hdev, HCI_DEV_UP);
2336                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2337                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2338                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2339                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2340                     hdev->dev_type == HCI_BREDR) {
2341                         hci_dev_lock(hdev);
2342                         mgmt_powered(hdev, 1);
2343                         hci_dev_unlock(hdev);
2344                 }
2345         } else {
2346                 /* Init failed, cleanup */
2347                 flush_work(&hdev->tx_work);
2348                 flush_work(&hdev->cmd_work);
2349                 flush_work(&hdev->rx_work);
2350
2351                 skb_queue_purge(&hdev->cmd_q);
2352                 skb_queue_purge(&hdev->rx_q);
2353
2354                 if (hdev->flush)
2355                         hdev->flush(hdev);
2356
2357                 if (hdev->sent_cmd) {
2358                         kfree_skb(hdev->sent_cmd);
2359                         hdev->sent_cmd = NULL;
2360                 }
2361
2362                 hdev->close(hdev);
2363                 hdev->flags &= BIT(HCI_RAW);
2364         }
2365
2366 done:
2367         hci_req_unlock(hdev);
2368         return ret;
2369 }
2370
2371 /* ---- HCI ioctl helpers ---- */
2372
2373 int hci_dev_open(__u16 dev)
2374 {
2375         struct hci_dev *hdev;
2376         int err;
2377
2378         hdev = hci_dev_get(dev);
2379         if (!hdev)
2380                 return -ENODEV;
2381
2382         /* Devices that are marked as unconfigured can only be powered
2383          * up as user channel. Trying to bring them up as normal devices
2384          * will result into a failure. Only user channel operation is
2385          * possible.
2386          *
2387          * When this function is called for a user channel, the flag
2388          * HCI_USER_CHANNEL will be set first before attempting to
2389          * open the device.
2390          */
2391         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2392             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2393                 err = -EOPNOTSUPP;
2394                 goto done;
2395         }
2396
2397         /* We need to ensure that no other power on/off work is pending
2398          * before proceeding to call hci_dev_do_open. This is
2399          * particularly important if the setup procedure has not yet
2400          * completed.
2401          */
2402         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2403                 cancel_delayed_work(&hdev->power_off);
2404
2405         /* After this call it is guaranteed that the setup procedure
2406          * has finished. This means that error conditions like RFKILL
2407          * or no valid public or static random address apply.
2408          */
2409         flush_workqueue(hdev->req_workqueue);
2410
2411         err = hci_dev_do_open(hdev);
2412
2413 done:
2414         hci_dev_put(hdev);
2415         return err;
2416 }
2417
2418 /* This function requires the caller holds hdev->lock */
2419 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2420 {
2421         struct hci_conn_params *p;
2422
2423         list_for_each_entry(p, &hdev->le_conn_params, list)
2424                 list_del_init(&p->action);
2425
2426         BT_DBG("All LE pending actions cleared");
2427 }
2428
2429 static int hci_dev_do_close(struct hci_dev *hdev)
2430 {
2431         BT_DBG("%s %p", hdev->name, hdev);
2432
2433         cancel_delayed_work(&hdev->power_off);
2434
2435         hci_req_cancel(hdev, ENODEV);
2436         hci_req_lock(hdev);
2437
2438         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2439                 cancel_delayed_work_sync(&hdev->cmd_timer);
2440                 hci_req_unlock(hdev);
2441                 return 0;
2442         }
2443
2444         /* Flush RX and TX works */
2445         flush_work(&hdev->tx_work);
2446         flush_work(&hdev->rx_work);
2447
2448         if (hdev->discov_timeout > 0) {
2449                 cancel_delayed_work(&hdev->discov_off);
2450                 hdev->discov_timeout = 0;
2451                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2452                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2453         }
2454
2455         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2456                 cancel_delayed_work(&hdev->service_cache);
2457
2458         cancel_delayed_work_sync(&hdev->le_scan_disable);
2459
2460         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2461                 cancel_delayed_work_sync(&hdev->rpa_expired);
2462
2463         hci_dev_lock(hdev);
2464         hci_inquiry_cache_flush(hdev);
2465         hci_conn_hash_flush(hdev);
2466         hci_pend_le_actions_clear(hdev);
2467         hci_dev_unlock(hdev);
2468
2469         hci_notify(hdev, HCI_DEV_DOWN);
2470
2471         if (hdev->flush)
2472                 hdev->flush(hdev);
2473
2474         /* Reset device */
2475         skb_queue_purge(&hdev->cmd_q);
2476         atomic_set(&hdev->cmd_cnt, 1);
2477         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2478             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2479             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2480                 set_bit(HCI_INIT, &hdev->flags);
2481                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2482                 clear_bit(HCI_INIT, &hdev->flags);
2483         }
2484
2485         /* flush cmd  work */
2486         flush_work(&hdev->cmd_work);
2487
2488         /* Drop queues */
2489         skb_queue_purge(&hdev->rx_q);
2490         skb_queue_purge(&hdev->cmd_q);
2491         skb_queue_purge(&hdev->raw_q);
2492
2493         /* Drop last sent command */
2494         if (hdev->sent_cmd) {
2495                 cancel_delayed_work_sync(&hdev->cmd_timer);
2496                 kfree_skb(hdev->sent_cmd);
2497                 hdev->sent_cmd = NULL;
2498         }
2499
2500         kfree_skb(hdev->recv_evt);
2501         hdev->recv_evt = NULL;
2502
2503         /* After this point our queues are empty
2504          * and no tasks are scheduled. */
2505         hdev->close(hdev);
2506
2507         /* Clear flags */
2508         hdev->flags &= BIT(HCI_RAW);
2509         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2510
2511         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2512                 if (hdev->dev_type == HCI_BREDR) {
2513                         hci_dev_lock(hdev);
2514                         mgmt_powered(hdev, 0);
2515                         hci_dev_unlock(hdev);
2516                 }
2517         }
2518
2519         /* Controller radio is available but is currently powered down */
2520         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2521
2522         memset(hdev->eir, 0, sizeof(hdev->eir));
2523         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2524         bacpy(&hdev->random_addr, BDADDR_ANY);
2525
2526         hci_req_unlock(hdev);
2527
2528         hci_dev_put(hdev);
2529         return 0;
2530 }
2531
2532 int hci_dev_close(__u16 dev)
2533 {
2534         struct hci_dev *hdev;
2535         int err;
2536
2537         hdev = hci_dev_get(dev);
2538         if (!hdev)
2539                 return -ENODEV;
2540
2541         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2542                 err = -EBUSY;
2543                 goto done;
2544         }
2545
2546         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2547                 cancel_delayed_work(&hdev->power_off);
2548
2549         err = hci_dev_do_close(hdev);
2550
2551 done:
2552         hci_dev_put(hdev);
2553         return err;
2554 }
2555
2556 int hci_dev_reset(__u16 dev)
2557 {
2558         struct hci_dev *hdev;
2559         int ret = 0;
2560
2561         hdev = hci_dev_get(dev);
2562         if (!hdev)
2563                 return -ENODEV;
2564
2565         hci_req_lock(hdev);
2566
2567         if (!test_bit(HCI_UP, &hdev->flags)) {
2568                 ret = -ENETDOWN;
2569                 goto done;
2570         }
2571
2572         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573                 ret = -EBUSY;
2574                 goto done;
2575         }
2576
2577         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2578                 ret = -EOPNOTSUPP;
2579                 goto done;
2580         }
2581
2582         /* Drop queues */
2583         skb_queue_purge(&hdev->rx_q);
2584         skb_queue_purge(&hdev->cmd_q);
2585
2586         hci_dev_lock(hdev);
2587         hci_inquiry_cache_flush(hdev);
2588         hci_conn_hash_flush(hdev);
2589         hci_dev_unlock(hdev);
2590
2591         if (hdev->flush)
2592                 hdev->flush(hdev);
2593
2594         atomic_set(&hdev->cmd_cnt, 1);
2595         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2596
2597         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2598
2599 done:
2600         hci_req_unlock(hdev);
2601         hci_dev_put(hdev);
2602         return ret;
2603 }
2604
2605 int hci_dev_reset_stat(__u16 dev)
2606 {
2607         struct hci_dev *hdev;
2608         int ret = 0;
2609
2610         hdev = hci_dev_get(dev);
2611         if (!hdev)
2612                 return -ENODEV;
2613
2614         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2615                 ret = -EBUSY;
2616                 goto done;
2617         }
2618
2619         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2620                 ret = -EOPNOTSUPP;
2621                 goto done;
2622         }
2623
2624         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2625
2626 done:
2627         hci_dev_put(hdev);
2628         return ret;
2629 }
2630
2631 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2632 {
2633         struct hci_dev *hdev;
2634         struct hci_dev_req dr;
2635         int err = 0;
2636
2637         if (copy_from_user(&dr, arg, sizeof(dr)))
2638                 return -EFAULT;
2639
2640         hdev = hci_dev_get(dr.dev_id);
2641         if (!hdev)
2642                 return -ENODEV;
2643
2644         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2645                 err = -EBUSY;
2646                 goto done;
2647         }
2648
2649         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2650                 err = -EOPNOTSUPP;
2651                 goto done;
2652         }
2653
2654         if (hdev->dev_type != HCI_BREDR) {
2655                 err = -EOPNOTSUPP;
2656                 goto done;
2657         }
2658
2659         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2660                 err = -EOPNOTSUPP;
2661                 goto done;
2662         }
2663
2664         switch (cmd) {
2665         case HCISETAUTH:
2666                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2667                                    HCI_INIT_TIMEOUT);
2668                 break;
2669
2670         case HCISETENCRYPT:
2671                 if (!lmp_encrypt_capable(hdev)) {
2672                         err = -EOPNOTSUPP;
2673                         break;
2674                 }
2675
2676                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2677                         /* Auth must be enabled first */
2678                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2679                                            HCI_INIT_TIMEOUT);
2680                         if (err)
2681                                 break;
2682                 }
2683
2684                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2685                                    HCI_INIT_TIMEOUT);
2686                 break;
2687
2688         case HCISETSCAN:
2689                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2690                                    HCI_INIT_TIMEOUT);
2691                 break;
2692
2693         case HCISETLINKPOL:
2694                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2695                                    HCI_INIT_TIMEOUT);
2696                 break;
2697
2698         case HCISETLINKMODE:
2699                 hdev->link_mode = ((__u16) dr.dev_opt) &
2700                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2701                 break;
2702
2703         case HCISETPTYPE:
2704                 hdev->pkt_type = (__u16) dr.dev_opt;
2705                 break;
2706
2707         case HCISETACLMTU:
2708                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2709                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2710                 break;
2711
2712         case HCISETSCOMTU:
2713                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2714                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2715                 break;
2716
2717         default:
2718                 err = -EINVAL;
2719                 break;
2720         }
2721
2722 done:
2723         hci_dev_put(hdev);
2724         return err;
2725 }
2726
2727 int hci_get_dev_list(void __user *arg)
2728 {
2729         struct hci_dev *hdev;
2730         struct hci_dev_list_req *dl;
2731         struct hci_dev_req *dr;
2732         int n = 0, size, err;
2733         __u16 dev_num;
2734
2735         if (get_user(dev_num, (__u16 __user *) arg))
2736                 return -EFAULT;
2737
2738         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2739                 return -EINVAL;
2740
2741         size = sizeof(*dl) + dev_num * sizeof(*dr);
2742
2743         dl = kzalloc(size, GFP_KERNEL);
2744         if (!dl)
2745                 return -ENOMEM;
2746
2747         dr = dl->dev_req;
2748
2749         read_lock(&hci_dev_list_lock);
2750         list_for_each_entry(hdev, &hci_dev_list, list) {
2751                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2752                         cancel_delayed_work(&hdev->power_off);
2753
2754                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2755                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2756
2757                 (dr + n)->dev_id  = hdev->id;
2758                 (dr + n)->dev_opt = hdev->flags;
2759
2760                 if (++n >= dev_num)
2761                         break;
2762         }
2763         read_unlock(&hci_dev_list_lock);
2764
2765         dl->dev_num = n;
2766         size = sizeof(*dl) + n * sizeof(*dr);
2767
2768         err = copy_to_user(arg, dl, size);
2769         kfree(dl);
2770
2771         return err ? -EFAULT : 0;
2772 }
2773
2774 int hci_get_dev_info(void __user *arg)
2775 {
2776         struct hci_dev *hdev;
2777         struct hci_dev_info di;
2778         int err = 0;
2779
2780         if (copy_from_user(&di, arg, sizeof(di)))
2781                 return -EFAULT;
2782
2783         hdev = hci_dev_get(di.dev_id);
2784         if (!hdev)
2785                 return -ENODEV;
2786
2787         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2788                 cancel_delayed_work_sync(&hdev->power_off);
2789
2790         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2791                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2792
2793         strcpy(di.name, hdev->name);
2794         di.bdaddr   = hdev->bdaddr;
2795         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2796         di.flags    = hdev->flags;
2797         di.pkt_type = hdev->pkt_type;
2798         if (lmp_bredr_capable(hdev)) {
2799                 di.acl_mtu  = hdev->acl_mtu;
2800                 di.acl_pkts = hdev->acl_pkts;
2801                 di.sco_mtu  = hdev->sco_mtu;
2802                 di.sco_pkts = hdev->sco_pkts;
2803         } else {
2804                 di.acl_mtu  = hdev->le_mtu;
2805                 di.acl_pkts = hdev->le_pkts;
2806                 di.sco_mtu  = 0;
2807                 di.sco_pkts = 0;
2808         }
2809         di.link_policy = hdev->link_policy;
2810         di.link_mode   = hdev->link_mode;
2811
2812         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2813         memcpy(&di.features, &hdev->features, sizeof(di.features));
2814
2815         if (copy_to_user(arg, &di, sizeof(di)))
2816                 err = -EFAULT;
2817
2818         hci_dev_put(hdev);
2819
2820         return err;
2821 }
2822
2823 /* ---- Interface to HCI drivers ---- */
2824
2825 static int hci_rfkill_set_block(void *data, bool blocked)
2826 {
2827         struct hci_dev *hdev = data;
2828
2829         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2830
2831         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2832                 return -EBUSY;
2833
2834         if (blocked) {
2835                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2836                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2837                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2838                         hci_dev_do_close(hdev);
2839         } else {
2840                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2841         }
2842
2843         return 0;
2844 }
2845
2846 static const struct rfkill_ops hci_rfkill_ops = {
2847         .set_block = hci_rfkill_set_block,
2848 };
2849
2850 static void hci_power_on(struct work_struct *work)
2851 {
2852         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2853         int err;
2854
2855         BT_DBG("%s", hdev->name);
2856
2857         err = hci_dev_do_open(hdev);
2858         if (err < 0) {
2859                 mgmt_set_powered_failed(hdev, err);
2860                 return;
2861         }
2862
2863         /* During the HCI setup phase, a few error conditions are
2864          * ignored and they need to be checked now. If they are still
2865          * valid, it is important to turn the device back off.
2866          */
2867         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2868             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2869             (hdev->dev_type == HCI_BREDR &&
2870              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2871              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2872                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2873                 hci_dev_do_close(hdev);
2874         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2875                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2876                                    HCI_AUTO_OFF_TIMEOUT);
2877         }
2878
2879         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2880                 /* For unconfigured devices, set the HCI_RAW flag
2881                  * so that userspace can easily identify them.
2882                  */
2883                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2884                         set_bit(HCI_RAW, &hdev->flags);
2885
2886                 /* For fully configured devices, this will send
2887                  * the Index Added event. For unconfigured devices,
2888                  * it will send Unconfigued Index Added event.
2889                  *
2890                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2891                  * and no event will be send.
2892                  */
2893                 mgmt_index_added(hdev);
2894         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2895                 /* When the controller is now configured, then it
2896                  * is important to clear the HCI_RAW flag.
2897                  */
2898                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2899                         clear_bit(HCI_RAW, &hdev->flags);
2900
2901                 /* Powering on the controller with HCI_CONFIG set only
2902                  * happens with the transition from unconfigured to
2903                  * configured. This will send the Index Added event.
2904                  */
2905                 mgmt_index_added(hdev);
2906         }
2907 }
2908
2909 static void hci_power_off(struct work_struct *work)
2910 {
2911         struct hci_dev *hdev = container_of(work, struct hci_dev,
2912                                             power_off.work);
2913
2914         BT_DBG("%s", hdev->name);
2915
2916         hci_dev_do_close(hdev);
2917 }
2918
2919 static void hci_discov_off(struct work_struct *work)
2920 {
2921         struct hci_dev *hdev;
2922
2923         hdev = container_of(work, struct hci_dev, discov_off.work);
2924
2925         BT_DBG("%s", hdev->name);
2926
2927         mgmt_discoverable_timeout(hdev);
2928 }
2929
2930 void hci_uuids_clear(struct hci_dev *hdev)
2931 {
2932         struct bt_uuid *uuid, *tmp;
2933
2934         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2935                 list_del(&uuid->list);
2936                 kfree(uuid);
2937         }
2938 }
2939
2940 void hci_link_keys_clear(struct hci_dev *hdev)
2941 {
2942         struct list_head *p, *n;
2943
2944         list_for_each_safe(p, n, &hdev->link_keys) {
2945                 struct link_key *key;
2946
2947                 key = list_entry(p, struct link_key, list);
2948
2949                 list_del(p);
2950                 kfree(key);
2951         }
2952 }
2953
2954 void hci_smp_ltks_clear(struct hci_dev *hdev)
2955 {
2956         struct smp_ltk *k, *tmp;
2957
2958         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2959                 list_del(&k->list);
2960                 kfree(k);
2961         }
2962 }
2963
2964 void hci_smp_irks_clear(struct hci_dev *hdev)
2965 {
2966         struct smp_irk *k, *tmp;
2967
2968         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2969                 list_del(&k->list);
2970                 kfree(k);
2971         }
2972 }
2973
2974 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2975 {
2976         struct link_key *k;
2977
2978         list_for_each_entry(k, &hdev->link_keys, list)
2979                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2980                         return k;
2981
2982         return NULL;
2983 }
2984
2985 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2986                                u8 key_type, u8 old_key_type)
2987 {
2988         /* Legacy key */
2989         if (key_type < 0x03)
2990                 return true;
2991
2992         /* Debug keys are insecure so don't store them persistently */
2993         if (key_type == HCI_LK_DEBUG_COMBINATION)
2994                 return false;
2995
2996         /* Changed combination key and there's no previous one */
2997         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2998                 return false;
2999
3000         /* Security mode 3 case */
3001         if (!conn)
3002                 return true;
3003
3004         /* Neither local nor remote side had no-bonding as requirement */
3005         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3006                 return true;
3007
3008         /* Local side had dedicated bonding as requirement */
3009         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3010                 return true;
3011
3012         /* Remote side had dedicated bonding as requirement */
3013         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3014                 return true;
3015
3016         /* If none of the above criteria match, then don't store the key
3017          * persistently */
3018         return false;
3019 }
3020
3021 static bool ltk_type_master(u8 type)
3022 {
3023         return (type == SMP_LTK);
3024 }
3025
3026 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3027                              bool master)
3028 {
3029         struct smp_ltk *k;
3030
3031         list_for_each_entry(k, &hdev->long_term_keys, list) {
3032                 if (k->ediv != ediv || k->rand != rand)
3033                         continue;
3034
3035                 if (ltk_type_master(k->type) != master)
3036                         continue;
3037
3038                 return k;
3039         }
3040
3041         return NULL;
3042 }
3043
3044 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3045                                      u8 addr_type, bool master)
3046 {
3047         struct smp_ltk *k;
3048
3049         list_for_each_entry(k, &hdev->long_term_keys, list)
3050                 if (addr_type == k->bdaddr_type &&
3051                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3052                     ltk_type_master(k->type) == master)
3053                         return k;
3054
3055         return NULL;
3056 }
3057
3058 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3059 {
3060         struct smp_irk *irk;
3061
3062         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3063                 if (!bacmp(&irk->rpa, rpa))
3064                         return irk;
3065         }
3066
3067         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3068                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3069                         bacpy(&irk->rpa, rpa);
3070                         return irk;
3071                 }
3072         }
3073
3074         return NULL;
3075 }
3076
3077 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3078                                      u8 addr_type)
3079 {
3080         struct smp_irk *irk;
3081
3082         /* Identity Address must be public or static random */
3083         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3084                 return NULL;
3085
3086         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3087                 if (addr_type == irk->addr_type &&
3088                     bacmp(bdaddr, &irk->bdaddr) == 0)
3089                         return irk;
3090         }
3091
3092         return NULL;
3093 }
3094
3095 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3096                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3097                                   u8 pin_len, bool *persistent)
3098 {
3099         struct link_key *key, *old_key;
3100         u8 old_key_type;
3101
3102         old_key = hci_find_link_key(hdev, bdaddr);
3103         if (old_key) {
3104                 old_key_type = old_key->type;
3105                 key = old_key;
3106         } else {
3107                 old_key_type = conn ? conn->key_type : 0xff;
3108                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3109                 if (!key)
3110                         return NULL;
3111                 list_add(&key->list, &hdev->link_keys);
3112         }
3113
3114         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3115
3116         /* Some buggy controller combinations generate a changed
3117          * combination key for legacy pairing even when there's no
3118          * previous key */
3119         if (type == HCI_LK_CHANGED_COMBINATION &&
3120             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3121                 type = HCI_LK_COMBINATION;
3122                 if (conn)
3123                         conn->key_type = type;
3124         }
3125
3126         bacpy(&key->bdaddr, bdaddr);
3127         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3128         key->pin_len = pin_len;
3129
3130         if (type == HCI_LK_CHANGED_COMBINATION)
3131                 key->type = old_key_type;
3132         else
3133                 key->type = type;
3134
3135         if (persistent)
3136                 *persistent = hci_persistent_key(hdev, conn, type,
3137                                                  old_key_type);
3138
3139         return key;
3140 }
3141
3142 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3143                             u8 addr_type, u8 type, u8 authenticated,
3144                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3145 {
3146         struct smp_ltk *key, *old_key;
3147         bool master = ltk_type_master(type);
3148
3149         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3150         if (old_key)
3151                 key = old_key;
3152         else {
3153                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3154                 if (!key)
3155                         return NULL;
3156                 list_add(&key->list, &hdev->long_term_keys);
3157         }
3158
3159         bacpy(&key->bdaddr, bdaddr);
3160         key->bdaddr_type = addr_type;
3161         memcpy(key->val, tk, sizeof(key->val));
3162         key->authenticated = authenticated;
3163         key->ediv = ediv;
3164         key->rand = rand;
3165         key->enc_size = enc_size;
3166         key->type = type;
3167
3168         return key;
3169 }
3170
3171 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3172                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3173 {
3174         struct smp_irk *irk;
3175
3176         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3177         if (!irk) {
3178                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3179                 if (!irk)
3180                         return NULL;
3181
3182                 bacpy(&irk->bdaddr, bdaddr);
3183                 irk->addr_type = addr_type;
3184
3185                 list_add(&irk->list, &hdev->identity_resolving_keys);
3186         }
3187
3188         memcpy(irk->val, val, 16);
3189         bacpy(&irk->rpa, rpa);
3190
3191         return irk;
3192 }
3193
3194 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3195 {
3196         struct link_key *key;
3197
3198         key = hci_find_link_key(hdev, bdaddr);
3199         if (!key)
3200                 return -ENOENT;
3201
3202         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3203
3204         list_del(&key->list);
3205         kfree(key);
3206
3207         return 0;
3208 }
3209
3210 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3211 {
3212         struct smp_ltk *k, *tmp;
3213         int removed = 0;
3214
3215         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3216                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3217                         continue;
3218
3219                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3220
3221                 list_del(&k->list);
3222                 kfree(k);
3223                 removed++;
3224         }
3225
3226         return removed ? 0 : -ENOENT;
3227 }
3228
3229 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3230 {
3231         struct smp_irk *k, *tmp;
3232
3233         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3234                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3235                         continue;
3236
3237                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3238
3239                 list_del(&k->list);
3240                 kfree(k);
3241         }
3242 }
3243
3244 /* HCI command timer function */
3245 static void hci_cmd_timeout(struct work_struct *work)
3246 {
3247         struct hci_dev *hdev = container_of(work, struct hci_dev,
3248                                             cmd_timer.work);
3249
3250         if (hdev->sent_cmd) {
3251                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3252                 u16 opcode = __le16_to_cpu(sent->opcode);
3253
3254                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3255         } else {
3256                 BT_ERR("%s command tx timeout", hdev->name);
3257         }
3258
3259         atomic_set(&hdev->cmd_cnt, 1);
3260         queue_work(hdev->workqueue, &hdev->cmd_work);
3261 }
3262
3263 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3264                                           bdaddr_t *bdaddr)
3265 {
3266         struct oob_data *data;
3267
3268         list_for_each_entry(data, &hdev->remote_oob_data, list)
3269                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3270                         return data;
3271
3272         return NULL;
3273 }
3274
3275 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3276 {
3277         struct oob_data *data;
3278
3279         data = hci_find_remote_oob_data(hdev, bdaddr);
3280         if (!data)
3281                 return -ENOENT;
3282
3283         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3284
3285         list_del(&data->list);
3286         kfree(data);
3287
3288         return 0;
3289 }
3290
3291 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3292 {
3293         struct oob_data *data, *n;
3294
3295         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3296                 list_del(&data->list);
3297                 kfree(data);
3298         }
3299 }
3300
3301 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3302                             u8 *hash, u8 *randomizer)
3303 {
3304         struct oob_data *data;
3305
3306         data = hci_find_remote_oob_data(hdev, bdaddr);
3307         if (!data) {
3308                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3309                 if (!data)
3310                         return -ENOMEM;
3311
3312                 bacpy(&data->bdaddr, bdaddr);
3313                 list_add(&data->list, &hdev->remote_oob_data);
3314         }
3315
3316         memcpy(data->hash192, hash, sizeof(data->hash192));
3317         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3318
3319         memset(data->hash256, 0, sizeof(data->hash256));
3320         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3321
3322         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3323
3324         return 0;
3325 }
3326
3327 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3328                                 u8 *hash192, u8 *randomizer192,
3329                                 u8 *hash256, u8 *randomizer256)
3330 {
3331         struct oob_data *data;
3332
3333         data = hci_find_remote_oob_data(hdev, bdaddr);
3334         if (!data) {
3335                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3336                 if (!data)
3337                         return -ENOMEM;
3338
3339                 bacpy(&data->bdaddr, bdaddr);
3340                 list_add(&data->list, &hdev->remote_oob_data);
3341         }
3342
3343         memcpy(data->hash192, hash192, sizeof(data->hash192));
3344         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3345
3346         memcpy(data->hash256, hash256, sizeof(data->hash256));
3347         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3348
3349         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3350
3351         return 0;
3352 }
3353
3354 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3355                                          bdaddr_t *bdaddr, u8 type)
3356 {
3357         struct bdaddr_list *b;
3358
3359         list_for_each_entry(b, bdaddr_list, list) {
3360                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3361                         return b;
3362         }
3363
3364         return NULL;
3365 }
3366
3367 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3368 {
3369         struct list_head *p, *n;
3370
3371         list_for_each_safe(p, n, bdaddr_list) {
3372                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3373
3374                 list_del(p);
3375                 kfree(b);
3376         }
3377 }
3378
3379 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3380 {
3381         struct bdaddr_list *entry;
3382
3383         if (!bacmp(bdaddr, BDADDR_ANY))
3384                 return -EBADF;
3385
3386         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3387                 return -EEXIST;
3388
3389         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3390         if (!entry)
3391                 return -ENOMEM;
3392
3393         bacpy(&entry->bdaddr, bdaddr);
3394         entry->bdaddr_type = type;
3395
3396         list_add(&entry->list, list);
3397
3398         return 0;
3399 }
3400
3401 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3402 {
3403         struct bdaddr_list *entry;
3404
3405         if (!bacmp(bdaddr, BDADDR_ANY)) {
3406                 hci_bdaddr_list_clear(list);
3407                 return 0;
3408         }
3409
3410         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3411         if (!entry)
3412                 return -ENOENT;
3413
3414         list_del(&entry->list);
3415         kfree(entry);
3416
3417         return 0;
3418 }
3419
3420 /* This function requires the caller holds hdev->lock */
3421 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3422                                                bdaddr_t *addr, u8 addr_type)
3423 {
3424         struct hci_conn_params *params;
3425
3426         /* The conn params list only contains identity addresses */
3427         if (!hci_is_identity_address(addr, addr_type))
3428                 return NULL;
3429
3430         list_for_each_entry(params, &hdev->le_conn_params, list) {
3431                 if (bacmp(&params->addr, addr) == 0 &&
3432                     params->addr_type == addr_type) {
3433                         return params;
3434                 }
3435         }
3436
3437         return NULL;
3438 }
3439
3440 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3441 {
3442         struct hci_conn *conn;
3443
3444         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3445         if (!conn)
3446                 return false;
3447
3448         if (conn->dst_type != type)
3449                 return false;
3450
3451         if (conn->state != BT_CONNECTED)
3452                 return false;
3453
3454         return true;
3455 }
3456
3457 /* This function requires the caller holds hdev->lock */
3458 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3459                                                   bdaddr_t *addr, u8 addr_type)
3460 {
3461         struct hci_conn_params *param;
3462
3463         /* The list only contains identity addresses */
3464         if (!hci_is_identity_address(addr, addr_type))
3465                 return NULL;
3466
3467         list_for_each_entry(param, list, action) {
3468                 if (bacmp(&param->addr, addr) == 0 &&
3469                     param->addr_type == addr_type)
3470                         return param;
3471         }
3472
3473         return NULL;
3474 }
3475
3476 /* This function requires the caller holds hdev->lock */
3477 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3478                                             bdaddr_t *addr, u8 addr_type)
3479 {
3480         struct hci_conn_params *params;
3481
3482         if (!hci_is_identity_address(addr, addr_type))
3483                 return NULL;
3484
3485         params = hci_conn_params_lookup(hdev, addr, addr_type);
3486         if (params)
3487                 return params;
3488
3489         params = kzalloc(sizeof(*params), GFP_KERNEL);
3490         if (!params) {
3491                 BT_ERR("Out of memory");
3492                 return NULL;
3493         }
3494
3495         bacpy(&params->addr, addr);
3496         params->addr_type = addr_type;
3497
3498         list_add(&params->list, &hdev->le_conn_params);
3499         INIT_LIST_HEAD(&params->action);
3500
3501         params->conn_min_interval = hdev->le_conn_min_interval;
3502         params->conn_max_interval = hdev->le_conn_max_interval;
3503         params->conn_latency = hdev->le_conn_latency;
3504         params->supervision_timeout = hdev->le_supv_timeout;
3505         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3506
3507         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3508
3509         return params;
3510 }
3511
3512 /* This function requires the caller holds hdev->lock */
3513 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3514                         u8 auto_connect)
3515 {
3516         struct hci_conn_params *params;
3517
3518         params = hci_conn_params_add(hdev, addr, addr_type);
3519         if (!params)
3520                 return -EIO;
3521
3522         if (params->auto_connect == auto_connect)
3523                 return 0;
3524
3525         list_del_init(&params->action);
3526
3527         switch (auto_connect) {
3528         case HCI_AUTO_CONN_DISABLED:
3529         case HCI_AUTO_CONN_LINK_LOSS:
3530                 hci_update_background_scan(hdev);
3531                 break;
3532         case HCI_AUTO_CONN_REPORT:
3533                 list_add(&params->action, &hdev->pend_le_reports);
3534                 hci_update_background_scan(hdev);
3535                 break;
3536         case HCI_AUTO_CONN_ALWAYS:
3537                 if (!is_connected(hdev, addr, addr_type)) {
3538                         list_add(&params->action, &hdev->pend_le_conns);
3539                         hci_update_background_scan(hdev);
3540                 }
3541                 break;
3542         }
3543
3544         params->auto_connect = auto_connect;
3545
3546         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3547                auto_connect);
3548
3549         return 0;
3550 }
3551
3552 /* This function requires the caller holds hdev->lock */
3553 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3554 {
3555         struct hci_conn_params *params;
3556
3557         params = hci_conn_params_lookup(hdev, addr, addr_type);
3558         if (!params)
3559                 return;
3560
3561         list_del(&params->action);
3562         list_del(&params->list);
3563         kfree(params);
3564
3565         hci_update_background_scan(hdev);
3566
3567         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3568 }
3569
3570 /* This function requires the caller holds hdev->lock */
3571 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3572 {
3573         struct hci_conn_params *params, *tmp;
3574
3575         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3576                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3577                         continue;
3578                 list_del(&params->list);
3579                 kfree(params);
3580         }
3581
3582         BT_DBG("All LE disabled connection parameters were removed");
3583 }
3584
3585 /* This function requires the caller holds hdev->lock */
3586 void hci_conn_params_clear_all(struct hci_dev *hdev)
3587 {
3588         struct hci_conn_params *params, *tmp;
3589
3590         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3591                 list_del(&params->action);
3592                 list_del(&params->list);
3593                 kfree(params);
3594         }
3595
3596         hci_update_background_scan(hdev);
3597
3598         BT_DBG("All LE connection parameters were removed");
3599 }
3600
3601 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3602 {
3603         if (status) {
3604                 BT_ERR("Failed to start inquiry: status %d", status);
3605
3606                 hci_dev_lock(hdev);
3607                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3608                 hci_dev_unlock(hdev);
3609                 return;
3610         }
3611 }
3612
3613 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3614 {
3615         /* General inquiry access code (GIAC) */
3616         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3617         struct hci_request req;
3618         struct hci_cp_inquiry cp;
3619         int err;
3620
3621         if (status) {
3622                 BT_ERR("Failed to disable LE scanning: status %d", status);
3623                 return;
3624         }
3625
3626         switch (hdev->discovery.type) {
3627         case DISCOV_TYPE_LE:
3628                 hci_dev_lock(hdev);
3629                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3630                 hci_dev_unlock(hdev);
3631                 break;
3632
3633         case DISCOV_TYPE_INTERLEAVED:
3634                 hci_req_init(&req, hdev);
3635
3636                 memset(&cp, 0, sizeof(cp));
3637                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3638                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3639                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3640
3641                 hci_dev_lock(hdev);
3642
3643                 hci_inquiry_cache_flush(hdev);
3644
3645                 err = hci_req_run(&req, inquiry_complete);
3646                 if (err) {
3647                         BT_ERR("Inquiry request failed: err %d", err);
3648                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3649                 }
3650
3651                 hci_dev_unlock(hdev);
3652                 break;
3653         }
3654 }
3655
3656 static void le_scan_disable_work(struct work_struct *work)
3657 {
3658         struct hci_dev *hdev = container_of(work, struct hci_dev,
3659                                             le_scan_disable.work);
3660         struct hci_request req;
3661         int err;
3662
3663         BT_DBG("%s", hdev->name);
3664
3665         hci_req_init(&req, hdev);
3666
3667         hci_req_add_le_scan_disable(&req);
3668
3669         err = hci_req_run(&req, le_scan_disable_work_complete);
3670         if (err)
3671                 BT_ERR("Disable LE scanning request failed: err %d", err);
3672 }
3673
3674 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3675 {
3676         struct hci_dev *hdev = req->hdev;
3677
3678         /* If we're advertising or initiating an LE connection we can't
3679          * go ahead and change the random address at this time. This is
3680          * because the eventual initiator address used for the
3681          * subsequently created connection will be undefined (some
3682          * controllers use the new address and others the one we had
3683          * when the operation started).
3684          *
3685          * In this kind of scenario skip the update and let the random
3686          * address be updated at the next cycle.
3687          */
3688         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3689             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3690                 BT_DBG("Deferring random address update");
3691                 return;
3692         }
3693
3694         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3695 }
3696
3697 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3698                               u8 *own_addr_type)
3699 {
3700         struct hci_dev *hdev = req->hdev;
3701         int err;
3702
3703         /* If privacy is enabled use a resolvable private address. If
3704          * current RPA has expired or there is something else than
3705          * the current RPA in use, then generate a new one.
3706          */
3707         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3708                 int to;
3709
3710                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3711
3712                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3713                     !bacmp(&hdev->random_addr, &hdev->rpa))
3714                         return 0;
3715
3716                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3717                 if (err < 0) {
3718                         BT_ERR("%s failed to generate new RPA", hdev->name);
3719                         return err;
3720                 }
3721
3722                 set_random_addr(req, &hdev->rpa);
3723
3724                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3725                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3726
3727                 return 0;
3728         }
3729
3730         /* In case of required privacy without resolvable private address,
3731          * use an unresolvable private address. This is useful for active
3732          * scanning and non-connectable advertising.
3733          */
3734         if (require_privacy) {
3735                 bdaddr_t urpa;
3736
3737                 get_random_bytes(&urpa, 6);
3738                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3739
3740                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3741                 set_random_addr(req, &urpa);
3742                 return 0;
3743         }
3744
3745         /* If forcing static address is in use or there is no public
3746          * address use the static address as random address (but skip
3747          * the HCI command if the current random address is already the
3748          * static one.
3749          */
3750         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3751             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3752                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3753                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3754                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3755                                     &hdev->static_addr);
3756                 return 0;
3757         }
3758
3759         /* Neither privacy nor static address is being used so use a
3760          * public address.
3761          */
3762         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3763
3764         return 0;
3765 }
3766
3767 /* Copy the Identity Address of the controller.
3768  *
3769  * If the controller has a public BD_ADDR, then by default use that one.
3770  * If this is a LE only controller without a public address, default to
3771  * the static random address.
3772  *
3773  * For debugging purposes it is possible to force controllers with a
3774  * public address to use the static random address instead.
3775  */
3776 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3777                                u8 *bdaddr_type)
3778 {
3779         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3780             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3781                 bacpy(bdaddr, &hdev->static_addr);
3782                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3783         } else {
3784                 bacpy(bdaddr, &hdev->bdaddr);
3785                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3786         }
3787 }
3788
3789 /* Alloc HCI device */
3790 struct hci_dev *hci_alloc_dev(void)
3791 {
3792         struct hci_dev *hdev;
3793
3794         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3795         if (!hdev)
3796                 return NULL;
3797
3798         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3799         hdev->esco_type = (ESCO_HV1);
3800         hdev->link_mode = (HCI_LM_ACCEPT);
3801         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3802         hdev->io_capability = 0x03;     /* No Input No Output */
3803         hdev->manufacturer = 0xffff;    /* Default to internal use */
3804         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3805         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3806
3807         hdev->sniff_max_interval = 800;
3808         hdev->sniff_min_interval = 80;
3809
3810         hdev->le_adv_channel_map = 0x07;
3811         hdev->le_scan_interval = 0x0060;
3812         hdev->le_scan_window = 0x0030;
3813         hdev->le_conn_min_interval = 0x0028;
3814         hdev->le_conn_max_interval = 0x0038;
3815         hdev->le_conn_latency = 0x0000;
3816         hdev->le_supv_timeout = 0x002a;
3817
3818         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3819         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3820         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3821         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3822
3823         mutex_init(&hdev->lock);
3824         mutex_init(&hdev->req_lock);
3825
3826         INIT_LIST_HEAD(&hdev->mgmt_pending);
3827         INIT_LIST_HEAD(&hdev->blacklist);
3828         INIT_LIST_HEAD(&hdev->uuids);
3829         INIT_LIST_HEAD(&hdev->link_keys);
3830         INIT_LIST_HEAD(&hdev->long_term_keys);
3831         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3832         INIT_LIST_HEAD(&hdev->remote_oob_data);
3833         INIT_LIST_HEAD(&hdev->le_white_list);
3834         INIT_LIST_HEAD(&hdev->le_conn_params);
3835         INIT_LIST_HEAD(&hdev->pend_le_conns);
3836         INIT_LIST_HEAD(&hdev->pend_le_reports);
3837         INIT_LIST_HEAD(&hdev->conn_hash.list);
3838
3839         INIT_WORK(&hdev->rx_work, hci_rx_work);
3840         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3841         INIT_WORK(&hdev->tx_work, hci_tx_work);
3842         INIT_WORK(&hdev->power_on, hci_power_on);
3843
3844         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3845         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3846         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3847
3848         skb_queue_head_init(&hdev->rx_q);
3849         skb_queue_head_init(&hdev->cmd_q);
3850         skb_queue_head_init(&hdev->raw_q);
3851
3852         init_waitqueue_head(&hdev->req_wait_q);
3853
3854         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3855
3856         hci_init_sysfs(hdev);
3857         discovery_init(hdev);
3858
3859         return hdev;
3860 }
3861 EXPORT_SYMBOL(hci_alloc_dev);
3862
3863 /* Free HCI device */
3864 void hci_free_dev(struct hci_dev *hdev)
3865 {
3866         /* will free via device release */
3867         put_device(&hdev->dev);
3868 }
3869 EXPORT_SYMBOL(hci_free_dev);
3870
3871 /* Register HCI device */
3872 int hci_register_dev(struct hci_dev *hdev)
3873 {
3874         int id, error;
3875
3876         if (!hdev->open || !hdev->close || !hdev->send)
3877                 return -EINVAL;
3878
3879         /* Do not allow HCI_AMP devices to register at index 0,
3880          * so the index can be used as the AMP controller ID.
3881          */
3882         switch (hdev->dev_type) {
3883         case HCI_BREDR:
3884                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3885                 break;
3886         case HCI_AMP:
3887                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3888                 break;
3889         default:
3890                 return -EINVAL;
3891         }
3892
3893         if (id < 0)
3894                 return id;
3895
3896         sprintf(hdev->name, "hci%d", id);
3897         hdev->id = id;
3898
3899         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3900
3901         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3902                                           WQ_MEM_RECLAIM, 1, hdev->name);
3903         if (!hdev->workqueue) {
3904                 error = -ENOMEM;
3905                 goto err;
3906         }
3907
3908         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3909                                               WQ_MEM_RECLAIM, 1, hdev->name);
3910         if (!hdev->req_workqueue) {
3911                 destroy_workqueue(hdev->workqueue);
3912                 error = -ENOMEM;
3913                 goto err;
3914         }
3915
3916         if (!IS_ERR_OR_NULL(bt_debugfs))
3917                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3918
3919         dev_set_name(&hdev->dev, "%s", hdev->name);
3920
3921         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3922                                                CRYPTO_ALG_ASYNC);
3923         if (IS_ERR(hdev->tfm_aes)) {
3924                 BT_ERR("Unable to create crypto context");
3925                 error = PTR_ERR(hdev->tfm_aes);
3926                 hdev->tfm_aes = NULL;
3927                 goto err_wqueue;
3928         }
3929
3930         error = device_add(&hdev->dev);
3931         if (error < 0)
3932                 goto err_tfm;
3933
3934         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3935                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3936                                     hdev);
3937         if (hdev->rfkill) {
3938                 if (rfkill_register(hdev->rfkill) < 0) {
3939                         rfkill_destroy(hdev->rfkill);
3940                         hdev->rfkill = NULL;
3941                 }
3942         }
3943
3944         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3945                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3946
3947         set_bit(HCI_SETUP, &hdev->dev_flags);
3948         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3949
3950         if (hdev->dev_type == HCI_BREDR) {
3951                 /* Assume BR/EDR support until proven otherwise (such as
3952                  * through reading supported features during init.
3953                  */
3954                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3955         }
3956
3957         write_lock(&hci_dev_list_lock);
3958         list_add(&hdev->list, &hci_dev_list);
3959         write_unlock(&hci_dev_list_lock);
3960
3961         /* Devices that are marked for raw-only usage are unconfigured
3962          * and should not be included in normal operation.
3963          */
3964         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3965                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3966
3967         hci_notify(hdev, HCI_DEV_REG);
3968         hci_dev_hold(hdev);
3969
3970         queue_work(hdev->req_workqueue, &hdev->power_on);
3971
3972         return id;
3973
3974 err_tfm:
3975         crypto_free_blkcipher(hdev->tfm_aes);
3976 err_wqueue:
3977         destroy_workqueue(hdev->workqueue);
3978         destroy_workqueue(hdev->req_workqueue);
3979 err:
3980         ida_simple_remove(&hci_index_ida, hdev->id);
3981
3982         return error;
3983 }
3984 EXPORT_SYMBOL(hci_register_dev);
3985
3986 /* Unregister HCI device */
3987 void hci_unregister_dev(struct hci_dev *hdev)
3988 {
3989         int i, id;
3990
3991         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3992
3993         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3994
3995         id = hdev->id;
3996
3997         write_lock(&hci_dev_list_lock);
3998         list_del(&hdev->list);
3999         write_unlock(&hci_dev_list_lock);
4000
4001         hci_dev_do_close(hdev);
4002
4003         for (i = 0; i < NUM_REASSEMBLY; i++)
4004                 kfree_skb(hdev->reassembly[i]);
4005
4006         cancel_work_sync(&hdev->power_on);
4007
4008         if (!test_bit(HCI_INIT, &hdev->flags) &&
4009             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4010             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4011                 hci_dev_lock(hdev);
4012                 mgmt_index_removed(hdev);
4013                 hci_dev_unlock(hdev);
4014         }
4015
4016         /* mgmt_index_removed should take care of emptying the
4017          * pending list */
4018         BUG_ON(!list_empty(&hdev->mgmt_pending));
4019
4020         hci_notify(hdev, HCI_DEV_UNREG);
4021
4022         if (hdev->rfkill) {
4023                 rfkill_unregister(hdev->rfkill);
4024                 rfkill_destroy(hdev->rfkill);
4025         }
4026
4027         if (hdev->tfm_aes)
4028                 crypto_free_blkcipher(hdev->tfm_aes);
4029
4030         device_del(&hdev->dev);
4031
4032         debugfs_remove_recursive(hdev->debugfs);
4033
4034         destroy_workqueue(hdev->workqueue);
4035         destroy_workqueue(hdev->req_workqueue);
4036
4037         hci_dev_lock(hdev);
4038         hci_bdaddr_list_clear(&hdev->blacklist);
4039         hci_uuids_clear(hdev);
4040         hci_link_keys_clear(hdev);
4041         hci_smp_ltks_clear(hdev);
4042         hci_smp_irks_clear(hdev);
4043         hci_remote_oob_data_clear(hdev);
4044         hci_bdaddr_list_clear(&hdev->le_white_list);
4045         hci_conn_params_clear_all(hdev);
4046         hci_dev_unlock(hdev);
4047
4048         hci_dev_put(hdev);
4049
4050         ida_simple_remove(&hci_index_ida, id);
4051 }
4052 EXPORT_SYMBOL(hci_unregister_dev);
4053
4054 /* Suspend HCI device */
4055 int hci_suspend_dev(struct hci_dev *hdev)
4056 {
4057         hci_notify(hdev, HCI_DEV_SUSPEND);
4058         return 0;
4059 }
4060 EXPORT_SYMBOL(hci_suspend_dev);
4061
4062 /* Resume HCI device */
4063 int hci_resume_dev(struct hci_dev *hdev)
4064 {
4065         hci_notify(hdev, HCI_DEV_RESUME);
4066         return 0;
4067 }
4068 EXPORT_SYMBOL(hci_resume_dev);
4069
4070 /* Receive frame from HCI drivers */
4071 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4072 {
4073         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4074                       && !test_bit(HCI_INIT, &hdev->flags))) {
4075                 kfree_skb(skb);
4076                 return -ENXIO;
4077         }
4078
4079         /* Incoming skb */
4080         bt_cb(skb)->incoming = 1;
4081
4082         /* Time stamp */
4083         __net_timestamp(skb);
4084
4085         skb_queue_tail(&hdev->rx_q, skb);
4086         queue_work(hdev->workqueue, &hdev->rx_work);
4087
4088         return 0;
4089 }
4090 EXPORT_SYMBOL(hci_recv_frame);
4091
4092 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4093                           int count, __u8 index)
4094 {
4095         int len = 0;
4096         int hlen = 0;
4097         int remain = count;
4098         struct sk_buff *skb;
4099         struct bt_skb_cb *scb;
4100
4101         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4102             index >= NUM_REASSEMBLY)
4103                 return -EILSEQ;
4104
4105         skb = hdev->reassembly[index];
4106
4107         if (!skb) {
4108                 switch (type) {
4109                 case HCI_ACLDATA_PKT:
4110                         len = HCI_MAX_FRAME_SIZE;
4111                         hlen = HCI_ACL_HDR_SIZE;
4112                         break;
4113                 case HCI_EVENT_PKT:
4114                         len = HCI_MAX_EVENT_SIZE;
4115                         hlen = HCI_EVENT_HDR_SIZE;
4116                         break;
4117                 case HCI_SCODATA_PKT:
4118                         len = HCI_MAX_SCO_SIZE;
4119                         hlen = HCI_SCO_HDR_SIZE;
4120                         break;
4121                 }
4122
4123                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4124                 if (!skb)
4125                         return -ENOMEM;
4126
4127                 scb = (void *) skb->cb;
4128                 scb->expect = hlen;
4129                 scb->pkt_type = type;
4130
4131                 hdev->reassembly[index] = skb;
4132         }
4133
4134         while (count) {
4135                 scb = (void *) skb->cb;
4136                 len = min_t(uint, scb->expect, count);
4137
4138                 memcpy(skb_put(skb, len), data, len);
4139
4140                 count -= len;
4141                 data += len;
4142                 scb->expect -= len;
4143                 remain = count;
4144
4145                 switch (type) {
4146                 case HCI_EVENT_PKT:
4147                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4148                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4149                                 scb->expect = h->plen;
4150
4151                                 if (skb_tailroom(skb) < scb->expect) {
4152                                         kfree_skb(skb);
4153                                         hdev->reassembly[index] = NULL;
4154                                         return -ENOMEM;
4155                                 }
4156                         }
4157                         break;
4158
4159                 case HCI_ACLDATA_PKT:
4160                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4161                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4162                                 scb->expect = __le16_to_cpu(h->dlen);
4163
4164                                 if (skb_tailroom(skb) < scb->expect) {
4165                                         kfree_skb(skb);
4166                                         hdev->reassembly[index] = NULL;
4167                                         return -ENOMEM;
4168                                 }
4169                         }
4170                         break;
4171
4172                 case HCI_SCODATA_PKT:
4173                         if (skb->len == HCI_SCO_HDR_SIZE) {
4174                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4175                                 scb->expect = h->dlen;
4176
4177                                 if (skb_tailroom(skb) < scb->expect) {
4178                                         kfree_skb(skb);
4179                                         hdev->reassembly[index] = NULL;
4180                                         return -ENOMEM;
4181                                 }
4182                         }
4183                         break;
4184                 }
4185
4186                 if (scb->expect == 0) {
4187                         /* Complete frame */
4188
4189                         bt_cb(skb)->pkt_type = type;
4190                         hci_recv_frame(hdev, skb);
4191
4192                         hdev->reassembly[index] = NULL;
4193                         return remain;
4194                 }
4195         }
4196
4197         return remain;
4198 }
4199
4200 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4201 {
4202         int rem = 0;
4203
4204         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4205                 return -EILSEQ;
4206
4207         while (count) {
4208                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4209                 if (rem < 0)
4210                         return rem;
4211
4212                 data += (count - rem);
4213                 count = rem;
4214         }
4215
4216         return rem;
4217 }
4218 EXPORT_SYMBOL(hci_recv_fragment);
4219
4220 #define STREAM_REASSEMBLY 0
4221
4222 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4223 {
4224         int type;
4225         int rem = 0;
4226
4227         while (count) {
4228                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4229
4230                 if (!skb) {
4231                         struct { char type; } *pkt;
4232
4233                         /* Start of the frame */
4234                         pkt = data;
4235                         type = pkt->type;
4236
4237                         data++;
4238                         count--;
4239                 } else
4240                         type = bt_cb(skb)->pkt_type;
4241
4242                 rem = hci_reassembly(hdev, type, data, count,
4243                                      STREAM_REASSEMBLY);
4244                 if (rem < 0)
4245                         return rem;
4246
4247                 data += (count - rem);
4248                 count = rem;
4249         }
4250
4251         return rem;
4252 }
4253 EXPORT_SYMBOL(hci_recv_stream_fragment);
4254
4255 /* ---- Interface to upper protocols ---- */
4256
4257 int hci_register_cb(struct hci_cb *cb)
4258 {
4259         BT_DBG("%p name %s", cb, cb->name);
4260
4261         write_lock(&hci_cb_list_lock);
4262         list_add(&cb->list, &hci_cb_list);
4263         write_unlock(&hci_cb_list_lock);
4264
4265         return 0;
4266 }
4267 EXPORT_SYMBOL(hci_register_cb);
4268
4269 int hci_unregister_cb(struct hci_cb *cb)
4270 {
4271         BT_DBG("%p name %s", cb, cb->name);
4272
4273         write_lock(&hci_cb_list_lock);
4274         list_del(&cb->list);
4275         write_unlock(&hci_cb_list_lock);
4276
4277         return 0;
4278 }
4279 EXPORT_SYMBOL(hci_unregister_cb);
4280
4281 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4282 {
4283         int err;
4284
4285         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4286
4287         /* Time stamp */
4288         __net_timestamp(skb);
4289
4290         /* Send copy to monitor */
4291         hci_send_to_monitor(hdev, skb);
4292
4293         if (atomic_read(&hdev->promisc)) {
4294                 /* Send copy to the sockets */
4295                 hci_send_to_sock(hdev, skb);
4296         }
4297
4298         /* Get rid of skb owner, prior to sending to the driver. */
4299         skb_orphan(skb);
4300
4301         err = hdev->send(hdev, skb);
4302         if (err < 0) {
4303                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4304                 kfree_skb(skb);
4305         }
4306 }
4307
4308 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4309 {
4310         skb_queue_head_init(&req->cmd_q);
4311         req->hdev = hdev;
4312         req->err = 0;
4313 }
4314
4315 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4316 {
4317         struct hci_dev *hdev = req->hdev;
4318         struct sk_buff *skb;
4319         unsigned long flags;
4320
4321         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4322
4323         /* If an error occured during request building, remove all HCI
4324          * commands queued on the HCI request queue.
4325          */
4326         if (req->err) {
4327                 skb_queue_purge(&req->cmd_q);
4328                 return req->err;
4329         }
4330
4331         /* Do not allow empty requests */
4332         if (skb_queue_empty(&req->cmd_q))
4333                 return -ENODATA;
4334
4335         skb = skb_peek_tail(&req->cmd_q);
4336         bt_cb(skb)->req.complete = complete;
4337
4338         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4339         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4340         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4341
4342         queue_work(hdev->workqueue, &hdev->cmd_work);
4343
4344         return 0;
4345 }
4346
4347 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4348                                        u32 plen, const void *param)
4349 {
4350         int len = HCI_COMMAND_HDR_SIZE + plen;
4351         struct hci_command_hdr *hdr;
4352         struct sk_buff *skb;
4353
4354         skb = bt_skb_alloc(len, GFP_ATOMIC);
4355         if (!skb)
4356                 return NULL;
4357
4358         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4359         hdr->opcode = cpu_to_le16(opcode);
4360         hdr->plen   = plen;
4361
4362         if (plen)
4363                 memcpy(skb_put(skb, plen), param, plen);
4364
4365         BT_DBG("skb len %d", skb->len);
4366
4367         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4368
4369         return skb;
4370 }
4371
4372 /* Send HCI command */
4373 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4374                  const void *param)
4375 {
4376         struct sk_buff *skb;
4377
4378         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4379
4380         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4381         if (!skb) {
4382                 BT_ERR("%s no memory for command", hdev->name);
4383                 return -ENOMEM;
4384         }
4385
4386         /* Stand-alone HCI commands must be flaged as
4387          * single-command requests.
4388          */
4389         bt_cb(skb)->req.start = true;
4390
4391         skb_queue_tail(&hdev->cmd_q, skb);
4392         queue_work(hdev->workqueue, &hdev->cmd_work);
4393
4394         return 0;
4395 }
4396
4397 /* Queue a command to an asynchronous HCI request */
4398 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4399                     const void *param, u8 event)
4400 {
4401         struct hci_dev *hdev = req->hdev;
4402         struct sk_buff *skb;
4403
4404         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4405
4406         /* If an error occured during request building, there is no point in
4407          * queueing the HCI command. We can simply return.
4408          */
4409         if (req->err)
4410                 return;
4411
4412         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4413         if (!skb) {
4414                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4415                        hdev->name, opcode);
4416                 req->err = -ENOMEM;
4417                 return;
4418         }
4419
4420         if (skb_queue_empty(&req->cmd_q))
4421                 bt_cb(skb)->req.start = true;
4422
4423         bt_cb(skb)->req.event = event;
4424
4425         skb_queue_tail(&req->cmd_q, skb);
4426 }
4427
4428 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4429                  const void *param)
4430 {
4431         hci_req_add_ev(req, opcode, plen, param, 0);
4432 }
4433
4434 /* Get data from the previously sent command */
4435 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4436 {
4437         struct hci_command_hdr *hdr;
4438
4439         if (!hdev->sent_cmd)
4440                 return NULL;
4441
4442         hdr = (void *) hdev->sent_cmd->data;
4443
4444         if (hdr->opcode != cpu_to_le16(opcode))
4445                 return NULL;
4446
4447         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4448
4449         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4450 }
4451
4452 /* Send ACL data */
4453 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4454 {
4455         struct hci_acl_hdr *hdr;
4456         int len = skb->len;
4457
4458         skb_push(skb, HCI_ACL_HDR_SIZE);
4459         skb_reset_transport_header(skb);
4460         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4461         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4462         hdr->dlen   = cpu_to_le16(len);
4463 }
4464
4465 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4466                           struct sk_buff *skb, __u16 flags)
4467 {
4468         struct hci_conn *conn = chan->conn;
4469         struct hci_dev *hdev = conn->hdev;
4470         struct sk_buff *list;
4471
4472         skb->len = skb_headlen(skb);
4473         skb->data_len = 0;
4474
4475         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4476
4477         switch (hdev->dev_type) {
4478         case HCI_BREDR:
4479                 hci_add_acl_hdr(skb, conn->handle, flags);
4480                 break;
4481         case HCI_AMP:
4482                 hci_add_acl_hdr(skb, chan->handle, flags);
4483                 break;
4484         default:
4485                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4486                 return;
4487         }
4488
4489         list = skb_shinfo(skb)->frag_list;
4490         if (!list) {
4491                 /* Non fragmented */
4492                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4493
4494                 skb_queue_tail(queue, skb);
4495         } else {
4496                 /* Fragmented */
4497                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4498
4499                 skb_shinfo(skb)->frag_list = NULL;
4500
4501                 /* Queue all fragments atomically */
4502                 spin_lock(&queue->lock);
4503
4504                 __skb_queue_tail(queue, skb);
4505
4506                 flags &= ~ACL_START;
4507                 flags |= ACL_CONT;
4508                 do {
4509                         skb = list; list = list->next;
4510
4511                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4512                         hci_add_acl_hdr(skb, conn->handle, flags);
4513
4514                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4515
4516                         __skb_queue_tail(queue, skb);
4517                 } while (list);
4518
4519                 spin_unlock(&queue->lock);
4520         }
4521 }
4522
4523 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4524 {
4525         struct hci_dev *hdev = chan->conn->hdev;
4526
4527         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4528
4529         hci_queue_acl(chan, &chan->data_q, skb, flags);
4530
4531         queue_work(hdev->workqueue, &hdev->tx_work);
4532 }
4533
4534 /* Send SCO data */
4535 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4536 {
4537         struct hci_dev *hdev = conn->hdev;
4538         struct hci_sco_hdr hdr;
4539
4540         BT_DBG("%s len %d", hdev->name, skb->len);
4541
4542         hdr.handle = cpu_to_le16(conn->handle);
4543         hdr.dlen   = skb->len;
4544
4545         skb_push(skb, HCI_SCO_HDR_SIZE);
4546         skb_reset_transport_header(skb);
4547         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4548
4549         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4550
4551         skb_queue_tail(&conn->data_q, skb);
4552         queue_work(hdev->workqueue, &hdev->tx_work);
4553 }
4554
4555 /* ---- HCI TX task (outgoing data) ---- */
4556
4557 /* HCI Connection scheduler */
4558 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4559                                      int *quote)
4560 {
4561         struct hci_conn_hash *h = &hdev->conn_hash;
4562         struct hci_conn *conn = NULL, *c;
4563         unsigned int num = 0, min = ~0;
4564
4565         /* We don't have to lock device here. Connections are always
4566          * added and removed with TX task disabled. */
4567
4568         rcu_read_lock();
4569
4570         list_for_each_entry_rcu(c, &h->list, list) {
4571                 if (c->type != type || skb_queue_empty(&c->data_q))
4572                         continue;
4573
4574                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4575                         continue;
4576
4577                 num++;
4578
4579                 if (c->sent < min) {
4580                         min  = c->sent;
4581                         conn = c;
4582                 }
4583
4584                 if (hci_conn_num(hdev, type) == num)
4585                         break;
4586         }
4587
4588         rcu_read_unlock();
4589
4590         if (conn) {
4591                 int cnt, q;
4592
4593                 switch (conn->type) {
4594                 case ACL_LINK:
4595                         cnt = hdev->acl_cnt;
4596                         break;
4597                 case SCO_LINK:
4598                 case ESCO_LINK:
4599                         cnt = hdev->sco_cnt;
4600                         break;
4601                 case LE_LINK:
4602                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4603                         break;
4604                 default:
4605                         cnt = 0;
4606                         BT_ERR("Unknown link type");
4607                 }
4608
4609                 q = cnt / num;
4610                 *quote = q ? q : 1;
4611         } else
4612                 *quote = 0;
4613
4614         BT_DBG("conn %p quote %d", conn, *quote);
4615         return conn;
4616 }
4617
4618 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4619 {
4620         struct hci_conn_hash *h = &hdev->conn_hash;
4621         struct hci_conn *c;
4622
4623         BT_ERR("%s link tx timeout", hdev->name);
4624
4625         rcu_read_lock();
4626
4627         /* Kill stalled connections */
4628         list_for_each_entry_rcu(c, &h->list, list) {
4629                 if (c->type == type && c->sent) {
4630                         BT_ERR("%s killing stalled connection %pMR",
4631                                hdev->name, &c->dst);
4632                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4633                 }
4634         }
4635
4636         rcu_read_unlock();
4637 }
4638
4639 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4640                                       int *quote)
4641 {
4642         struct hci_conn_hash *h = &hdev->conn_hash;
4643         struct hci_chan *chan = NULL;
4644         unsigned int num = 0, min = ~0, cur_prio = 0;
4645         struct hci_conn *conn;
4646         int cnt, q, conn_num = 0;
4647
4648         BT_DBG("%s", hdev->name);
4649
4650         rcu_read_lock();
4651
4652         list_for_each_entry_rcu(conn, &h->list, list) {
4653                 struct hci_chan *tmp;
4654
4655                 if (conn->type != type)
4656                         continue;
4657
4658                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4659                         continue;
4660
4661                 conn_num++;
4662
4663                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4664                         struct sk_buff *skb;
4665
4666                         if (skb_queue_empty(&tmp->data_q))
4667                                 continue;
4668
4669                         skb = skb_peek(&tmp->data_q);
4670                         if (skb->priority < cur_prio)
4671                                 continue;
4672
4673                         if (skb->priority > cur_prio) {
4674                                 num = 0;
4675                                 min = ~0;
4676                                 cur_prio = skb->priority;
4677                         }
4678
4679                         num++;
4680
4681                         if (conn->sent < min) {
4682                                 min  = conn->sent;
4683                                 chan = tmp;
4684                         }
4685                 }
4686
4687                 if (hci_conn_num(hdev, type) == conn_num)
4688                         break;
4689         }
4690
4691         rcu_read_unlock();
4692
4693         if (!chan)
4694                 return NULL;
4695
4696         switch (chan->conn->type) {
4697         case ACL_LINK:
4698                 cnt = hdev->acl_cnt;
4699                 break;
4700         case AMP_LINK:
4701                 cnt = hdev->block_cnt;
4702                 break;
4703         case SCO_LINK:
4704         case ESCO_LINK:
4705                 cnt = hdev->sco_cnt;
4706                 break;
4707         case LE_LINK:
4708                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4709                 break;
4710         default:
4711                 cnt = 0;
4712                 BT_ERR("Unknown link type");
4713         }
4714
4715         q = cnt / num;
4716         *quote = q ? q : 1;
4717         BT_DBG("chan %p quote %d", chan, *quote);
4718         return chan;
4719 }
4720
4721 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4722 {
4723         struct hci_conn_hash *h = &hdev->conn_hash;
4724         struct hci_conn *conn;
4725         int num = 0;
4726
4727         BT_DBG("%s", hdev->name);
4728
4729         rcu_read_lock();
4730
4731         list_for_each_entry_rcu(conn, &h->list, list) {
4732                 struct hci_chan *chan;
4733
4734                 if (conn->type != type)
4735                         continue;
4736
4737                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4738                         continue;
4739
4740                 num++;
4741
4742                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4743                         struct sk_buff *skb;
4744
4745                         if (chan->sent) {
4746                                 chan->sent = 0;
4747                                 continue;
4748                         }
4749
4750                         if (skb_queue_empty(&chan->data_q))
4751                                 continue;
4752
4753                         skb = skb_peek(&chan->data_q);
4754                         if (skb->priority >= HCI_PRIO_MAX - 1)
4755                                 continue;
4756
4757                         skb->priority = HCI_PRIO_MAX - 1;
4758
4759                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4760                                skb->priority);
4761                 }
4762
4763                 if (hci_conn_num(hdev, type) == num)
4764                         break;
4765         }
4766
4767         rcu_read_unlock();
4768
4769 }
4770
4771 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4772 {
4773         /* Calculate count of blocks used by this packet */
4774         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4775 }
4776
4777 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4778 {
4779         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4780                 /* ACL tx timeout must be longer than maximum
4781                  * link supervision timeout (40.9 seconds) */
4782                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4783                                        HCI_ACL_TX_TIMEOUT))
4784                         hci_link_tx_to(hdev, ACL_LINK);
4785         }
4786 }
4787
4788 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4789 {
4790         unsigned int cnt = hdev->acl_cnt;
4791         struct hci_chan *chan;
4792         struct sk_buff *skb;
4793         int quote;
4794
4795         __check_timeout(hdev, cnt);
4796
4797         while (hdev->acl_cnt &&
4798                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4799                 u32 priority = (skb_peek(&chan->data_q))->priority;
4800                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4801                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4802                                skb->len, skb->priority);
4803
4804                         /* Stop if priority has changed */
4805                         if (skb->priority < priority)
4806                                 break;
4807
4808                         skb = skb_dequeue(&chan->data_q);
4809
4810                         hci_conn_enter_active_mode(chan->conn,
4811                                                    bt_cb(skb)->force_active);
4812
4813                         hci_send_frame(hdev, skb);
4814                         hdev->acl_last_tx = jiffies;
4815
4816                         hdev->acl_cnt--;
4817                         chan->sent++;
4818                         chan->conn->sent++;
4819                 }
4820         }
4821
4822         if (cnt != hdev->acl_cnt)
4823                 hci_prio_recalculate(hdev, ACL_LINK);
4824 }
4825
4826 static void hci_sched_acl_blk(struct hci_dev *hdev)
4827 {
4828         unsigned int cnt = hdev->block_cnt;
4829         struct hci_chan *chan;
4830         struct sk_buff *skb;
4831         int quote;
4832         u8 type;
4833
4834         __check_timeout(hdev, cnt);
4835
4836         BT_DBG("%s", hdev->name);
4837
4838         if (hdev->dev_type == HCI_AMP)
4839                 type = AMP_LINK;
4840         else
4841                 type = ACL_LINK;
4842
4843         while (hdev->block_cnt > 0 &&
4844                (chan = hci_chan_sent(hdev, type, &quote))) {
4845                 u32 priority = (skb_peek(&chan->data_q))->priority;
4846                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4847                         int blocks;
4848
4849                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4850                                skb->len, skb->priority);
4851
4852                         /* Stop if priority has changed */
4853                         if (skb->priority < priority)
4854                                 break;
4855
4856                         skb = skb_dequeue(&chan->data_q);
4857
4858                         blocks = __get_blocks(hdev, skb);
4859                         if (blocks > hdev->block_cnt)
4860                                 return;
4861
4862                         hci_conn_enter_active_mode(chan->conn,
4863                                                    bt_cb(skb)->force_active);
4864
4865                         hci_send_frame(hdev, skb);
4866                         hdev->acl_last_tx = jiffies;
4867
4868                         hdev->block_cnt -= blocks;
4869                         quote -= blocks;
4870
4871                         chan->sent += blocks;
4872                         chan->conn->sent += blocks;
4873                 }
4874         }
4875
4876         if (cnt != hdev->block_cnt)
4877                 hci_prio_recalculate(hdev, type);
4878 }
4879
4880 static void hci_sched_acl(struct hci_dev *hdev)
4881 {
4882         BT_DBG("%s", hdev->name);
4883
4884         /* No ACL link over BR/EDR controller */
4885         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4886                 return;
4887
4888         /* No AMP link over AMP controller */
4889         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4890                 return;
4891
4892         switch (hdev->flow_ctl_mode) {
4893         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4894                 hci_sched_acl_pkt(hdev);
4895                 break;
4896
4897         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4898                 hci_sched_acl_blk(hdev);
4899                 break;
4900         }
4901 }
4902
4903 /* Schedule SCO */
4904 static void hci_sched_sco(struct hci_dev *hdev)
4905 {
4906         struct hci_conn *conn;
4907         struct sk_buff *skb;
4908         int quote;
4909
4910         BT_DBG("%s", hdev->name);
4911
4912         if (!hci_conn_num(hdev, SCO_LINK))
4913                 return;
4914
4915         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4916                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4917                         BT_DBG("skb %p len %d", skb, skb->len);
4918                         hci_send_frame(hdev, skb);
4919
4920                         conn->sent++;
4921                         if (conn->sent == ~0)
4922                                 conn->sent = 0;
4923                 }
4924         }
4925 }
4926
4927 static void hci_sched_esco(struct hci_dev *hdev)
4928 {
4929         struct hci_conn *conn;
4930         struct sk_buff *skb;
4931         int quote;
4932
4933         BT_DBG("%s", hdev->name);
4934
4935         if (!hci_conn_num(hdev, ESCO_LINK))
4936                 return;
4937
4938         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4939                                                      &quote))) {
4940                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4941                         BT_DBG("skb %p len %d", skb, skb->len);
4942                         hci_send_frame(hdev, skb);
4943
4944                         conn->sent++;
4945                         if (conn->sent == ~0)
4946                                 conn->sent = 0;
4947                 }
4948         }
4949 }
4950
4951 static void hci_sched_le(struct hci_dev *hdev)
4952 {
4953         struct hci_chan *chan;
4954         struct sk_buff *skb;
4955         int quote, cnt, tmp;
4956
4957         BT_DBG("%s", hdev->name);
4958
4959         if (!hci_conn_num(hdev, LE_LINK))
4960                 return;
4961
4962         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4963                 /* LE tx timeout must be longer than maximum
4964                  * link supervision timeout (40.9 seconds) */
4965                 if (!hdev->le_cnt && hdev->le_pkts &&
4966                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4967                         hci_link_tx_to(hdev, LE_LINK);
4968         }
4969
4970         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4971         tmp = cnt;
4972         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4973                 u32 priority = (skb_peek(&chan->data_q))->priority;
4974                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4975                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4976                                skb->len, skb->priority);
4977
4978                         /* Stop if priority has changed */
4979                         if (skb->priority < priority)
4980                                 break;
4981
4982                         skb = skb_dequeue(&chan->data_q);
4983
4984                         hci_send_frame(hdev, skb);
4985                         hdev->le_last_tx = jiffies;
4986
4987                         cnt--;
4988                         chan->sent++;
4989                         chan->conn->sent++;
4990                 }
4991         }
4992
4993         if (hdev->le_pkts)
4994                 hdev->le_cnt = cnt;
4995         else
4996                 hdev->acl_cnt = cnt;
4997
4998         if (cnt != tmp)
4999                 hci_prio_recalculate(hdev, LE_LINK);
5000 }
5001
5002 static void hci_tx_work(struct work_struct *work)
5003 {
5004         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5005         struct sk_buff *skb;
5006
5007         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5008                hdev->sco_cnt, hdev->le_cnt);
5009
5010         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5011                 /* Schedule queues and send stuff to HCI driver */
5012                 hci_sched_acl(hdev);
5013                 hci_sched_sco(hdev);
5014                 hci_sched_esco(hdev);
5015                 hci_sched_le(hdev);
5016         }
5017
5018         /* Send next queued raw (unknown type) packet */
5019         while ((skb = skb_dequeue(&hdev->raw_q)))
5020                 hci_send_frame(hdev, skb);
5021 }
5022
5023 /* ----- HCI RX task (incoming data processing) ----- */
5024
5025 /* ACL data packet */
5026 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5027 {
5028         struct hci_acl_hdr *hdr = (void *) skb->data;
5029         struct hci_conn *conn;
5030         __u16 handle, flags;
5031
5032         skb_pull(skb, HCI_ACL_HDR_SIZE);
5033
5034         handle = __le16_to_cpu(hdr->handle);
5035         flags  = hci_flags(handle);
5036         handle = hci_handle(handle);
5037
5038         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5039                handle, flags);
5040
5041         hdev->stat.acl_rx++;
5042
5043         hci_dev_lock(hdev);
5044         conn = hci_conn_hash_lookup_handle(hdev, handle);
5045         hci_dev_unlock(hdev);
5046
5047         if (conn) {
5048                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5049
5050                 /* Send to upper protocol */
5051                 l2cap_recv_acldata(conn, skb, flags);
5052                 return;
5053         } else {
5054                 BT_ERR("%s ACL packet for unknown connection handle %d",
5055                        hdev->name, handle);
5056         }
5057
5058         kfree_skb(skb);
5059 }
5060
5061 /* SCO data packet */
5062 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5063 {
5064         struct hci_sco_hdr *hdr = (void *) skb->data;
5065         struct hci_conn *conn;
5066         __u16 handle;
5067
5068         skb_pull(skb, HCI_SCO_HDR_SIZE);
5069
5070         handle = __le16_to_cpu(hdr->handle);
5071
5072         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5073
5074         hdev->stat.sco_rx++;
5075
5076         hci_dev_lock(hdev);
5077         conn = hci_conn_hash_lookup_handle(hdev, handle);
5078         hci_dev_unlock(hdev);
5079
5080         if (conn) {
5081                 /* Send to upper protocol */
5082                 sco_recv_scodata(conn, skb);
5083                 return;
5084         } else {
5085                 BT_ERR("%s SCO packet for unknown connection handle %d",
5086                        hdev->name, handle);
5087         }
5088
5089         kfree_skb(skb);
5090 }
5091
5092 static bool hci_req_is_complete(struct hci_dev *hdev)
5093 {
5094         struct sk_buff *skb;
5095
5096         skb = skb_peek(&hdev->cmd_q);
5097         if (!skb)
5098                 return true;
5099
5100         return bt_cb(skb)->req.start;
5101 }
5102
5103 static void hci_resend_last(struct hci_dev *hdev)
5104 {
5105         struct hci_command_hdr *sent;
5106         struct sk_buff *skb;
5107         u16 opcode;
5108
5109         if (!hdev->sent_cmd)
5110                 return;
5111
5112         sent = (void *) hdev->sent_cmd->data;
5113         opcode = __le16_to_cpu(sent->opcode);
5114         if (opcode == HCI_OP_RESET)
5115                 return;
5116
5117         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5118         if (!skb)
5119                 return;
5120
5121         skb_queue_head(&hdev->cmd_q, skb);
5122         queue_work(hdev->workqueue, &hdev->cmd_work);
5123 }
5124
5125 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5126 {
5127         hci_req_complete_t req_complete = NULL;
5128         struct sk_buff *skb;
5129         unsigned long flags;
5130
5131         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5132
5133         /* If the completed command doesn't match the last one that was
5134          * sent we need to do special handling of it.
5135          */
5136         if (!hci_sent_cmd_data(hdev, opcode)) {
5137                 /* Some CSR based controllers generate a spontaneous
5138                  * reset complete event during init and any pending
5139                  * command will never be completed. In such a case we
5140                  * need to resend whatever was the last sent
5141                  * command.
5142                  */
5143                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5144                         hci_resend_last(hdev);
5145
5146                 return;
5147         }
5148
5149         /* If the command succeeded and there's still more commands in
5150          * this request the request is not yet complete.
5151          */
5152         if (!status && !hci_req_is_complete(hdev))
5153                 return;
5154
5155         /* If this was the last command in a request the complete
5156          * callback would be found in hdev->sent_cmd instead of the
5157          * command queue (hdev->cmd_q).
5158          */
5159         if (hdev->sent_cmd) {
5160                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5161
5162                 if (req_complete) {
5163                         /* We must set the complete callback to NULL to
5164                          * avoid calling the callback more than once if
5165                          * this function gets called again.
5166                          */
5167                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5168
5169                         goto call_complete;
5170                 }
5171         }
5172
5173         /* Remove all pending commands belonging to this request */
5174         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5175         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5176                 if (bt_cb(skb)->req.start) {
5177                         __skb_queue_head(&hdev->cmd_q, skb);
5178                         break;
5179                 }
5180
5181                 req_complete = bt_cb(skb)->req.complete;
5182                 kfree_skb(skb);
5183         }
5184         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5185
5186 call_complete:
5187         if (req_complete)
5188                 req_complete(hdev, status);
5189 }
5190
5191 static void hci_rx_work(struct work_struct *work)
5192 {
5193         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5194         struct sk_buff *skb;
5195
5196         BT_DBG("%s", hdev->name);
5197
5198         while ((skb = skb_dequeue(&hdev->rx_q))) {
5199                 /* Send copy to monitor */
5200                 hci_send_to_monitor(hdev, skb);
5201
5202                 if (atomic_read(&hdev->promisc)) {
5203                         /* Send copy to the sockets */
5204                         hci_send_to_sock(hdev, skb);
5205                 }
5206
5207                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5208                         kfree_skb(skb);
5209                         continue;
5210                 }
5211
5212                 if (test_bit(HCI_INIT, &hdev->flags)) {
5213                         /* Don't process data packets in this states. */
5214                         switch (bt_cb(skb)->pkt_type) {
5215                         case HCI_ACLDATA_PKT:
5216                         case HCI_SCODATA_PKT:
5217                                 kfree_skb(skb);
5218                                 continue;
5219                         }
5220                 }
5221
5222                 /* Process frame */
5223                 switch (bt_cb(skb)->pkt_type) {
5224                 case HCI_EVENT_PKT:
5225                         BT_DBG("%s Event packet", hdev->name);
5226                         hci_event_packet(hdev, skb);
5227                         break;
5228
5229                 case HCI_ACLDATA_PKT:
5230                         BT_DBG("%s ACL data packet", hdev->name);
5231                         hci_acldata_packet(hdev, skb);
5232                         break;
5233
5234                 case HCI_SCODATA_PKT:
5235                         BT_DBG("%s SCO data packet", hdev->name);
5236                         hci_scodata_packet(hdev, skb);
5237                         break;
5238
5239                 default:
5240                         kfree_skb(skb);
5241                         break;
5242                 }
5243         }
5244 }
5245
5246 static void hci_cmd_work(struct work_struct *work)
5247 {
5248         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5249         struct sk_buff *skb;
5250
5251         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5252                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5253
5254         /* Send queued commands */
5255         if (atomic_read(&hdev->cmd_cnt)) {
5256                 skb = skb_dequeue(&hdev->cmd_q);
5257                 if (!skb)
5258                         return;
5259
5260                 kfree_skb(hdev->sent_cmd);
5261
5262                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5263                 if (hdev->sent_cmd) {
5264                         atomic_dec(&hdev->cmd_cnt);
5265                         hci_send_frame(hdev, skb);
5266                         if (test_bit(HCI_RESET, &hdev->flags))
5267                                 cancel_delayed_work(&hdev->cmd_timer);
5268                         else
5269                                 schedule_delayed_work(&hdev->cmd_timer,
5270                                                       HCI_CMD_TIMEOUT);
5271                 } else {
5272                         skb_queue_head(&hdev->cmd_q, skb);
5273                         queue_work(hdev->workqueue, &hdev->cmd_work);
5274                 }
5275         }
5276 }
5277
5278 void hci_req_add_le_scan_disable(struct hci_request *req)
5279 {
5280         struct hci_cp_le_set_scan_enable cp;
5281
5282         memset(&cp, 0, sizeof(cp));
5283         cp.enable = LE_SCAN_DISABLE;
5284         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5285 }
5286
5287 void hci_req_add_le_passive_scan(struct hci_request *req)
5288 {
5289         struct hci_cp_le_set_scan_param param_cp;
5290         struct hci_cp_le_set_scan_enable enable_cp;
5291         struct hci_dev *hdev = req->hdev;
5292         u8 own_addr_type;
5293
5294         /* Set require_privacy to false since no SCAN_REQ are send
5295          * during passive scanning. Not using an unresolvable address
5296          * here is important so that peer devices using direct
5297          * advertising with our address will be correctly reported
5298          * by the controller.
5299          */
5300         if (hci_update_random_address(req, false, &own_addr_type))
5301                 return;
5302
5303         memset(&param_cp, 0, sizeof(param_cp));
5304         param_cp.type = LE_SCAN_PASSIVE;
5305         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5306         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5307         param_cp.own_address_type = own_addr_type;
5308         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5309                     &param_cp);
5310
5311         memset(&enable_cp, 0, sizeof(enable_cp));
5312         enable_cp.enable = LE_SCAN_ENABLE;
5313         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5314         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5315                     &enable_cp);
5316 }
5317
5318 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5319 {
5320         if (status)
5321                 BT_DBG("HCI request failed to update background scanning: "
5322                        "status 0x%2.2x", status);
5323 }
5324
5325 /* This function controls the background scanning based on hdev->pend_le_conns
5326  * list. If there are pending LE connection we start the background scanning,
5327  * otherwise we stop it.
5328  *
5329  * This function requires the caller holds hdev->lock.
5330  */
5331 void hci_update_background_scan(struct hci_dev *hdev)
5332 {
5333         struct hci_request req;
5334         struct hci_conn *conn;
5335         int err;
5336
5337         if (!test_bit(HCI_UP, &hdev->flags) ||
5338             test_bit(HCI_INIT, &hdev->flags) ||
5339             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5340             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5341             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5342             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5343                 return;
5344
5345         /* No point in doing scanning if LE support hasn't been enabled */
5346         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5347                 return;
5348
5349         /* If discovery is active don't interfere with it */
5350         if (hdev->discovery.state != DISCOVERY_STOPPED)
5351                 return;
5352
5353         hci_req_init(&req, hdev);
5354
5355         if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5356             list_empty(&hdev->pend_le_conns) &&
5357             list_empty(&hdev->pend_le_reports)) {
5358                 /* If there is no pending LE connections or devices
5359                  * to be scanned for, we should stop the background
5360                  * scanning.
5361                  */
5362
5363                 /* If controller is not scanning we are done. */
5364                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5365                         return;
5366
5367                 hci_req_add_le_scan_disable(&req);
5368
5369                 BT_DBG("%s stopping background scanning", hdev->name);
5370         } else {
5371                 /* If there is at least one pending LE connection, we should
5372                  * keep the background scan running.
5373                  */
5374
5375                 /* If controller is connecting, we should not start scanning
5376                  * since some controllers are not able to scan and connect at
5377                  * the same time.
5378                  */
5379                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5380                 if (conn)
5381                         return;
5382
5383                 /* If controller is currently scanning, we stop it to ensure we
5384                  * don't miss any advertising (due to duplicates filter).
5385                  */
5386                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5387                         hci_req_add_le_scan_disable(&req);
5388
5389                 hci_req_add_le_passive_scan(&req);
5390
5391                 BT_DBG("%s starting background scanning", hdev->name);
5392         }
5393
5394         err = hci_req_run(&req, update_background_scan_complete);
5395         if (err)
5396                 BT_ERR("Failed to run HCI request: err %d", err);
5397 }