2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ---- HCI notifications ---- */
59 static void hci_notify(struct hci_dev *hdev, int event)
61 hci_sock_dev_event(hdev, event);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
84 size_t buf_size = min(count, (sizeof(buf)-1));
88 if (!test_bit(HCI_UP, &hdev->flags))
91 if (copy_from_user(buf, user_buf, buf_size))
95 if (strtobool(buf, &enable))
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
108 hci_req_unlock(hdev);
113 err = -bt_to_errno(skb->data[0]);
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
124 static const struct file_operations dut_mode_fops = {
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
131 static int features_show(struct seq_file *f, void *ptr)
133 struct hci_dev *hdev = f->private;
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
157 static int features_open(struct inode *inode, struct file *file)
159 return single_open(file, features_show, inode->i_private);
162 static const struct file_operations features_fops = {
163 .open = features_open,
166 .release = single_release,
169 static int blacklist_show(struct seq_file *f, void *p)
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
182 static int blacklist_open(struct inode *inode, struct file *file)
184 return single_open(file, blacklist_show, inode->i_private);
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
191 .release = single_release,
194 static int uuids_show(struct seq_file *f, void *p)
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
200 list_for_each_entry(uuid, &hdev->uuids, list) {
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
210 seq_printf(f, "%pUb\n", val);
212 hci_dev_unlock(hdev);
217 static int uuids_open(struct inode *inode, struct file *file)
219 return single_open(file, uuids_show, inode->i_private);
222 static const struct file_operations uuids_fops = {
226 .release = single_release,
229 static int inquiry_cache_show(struct seq_file *f, void *p)
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
248 hci_dev_unlock(hdev);
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
255 return single_open(file, inquiry_cache_show, inode->i_private);
258 static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
262 .release = single_release,
265 static int link_keys_show(struct seq_file *f, void *ptr)
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
276 hci_dev_unlock(hdev);
281 static int link_keys_open(struct inode *inode, struct file *file)
283 return single_open(file, link_keys_show, inode->i_private);
286 static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
290 .release = single_release,
293 static int dev_class_show(struct seq_file *f, void *ptr)
295 struct hci_dev *hdev = f->private;
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
305 static int dev_class_open(struct inode *inode, struct file *file)
307 return single_open(file, dev_class_show, inode->i_private);
310 static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
314 .release = single_release,
317 static int voice_setting_get(void *data, u64 *val)
319 struct hci_dev *hdev = data;
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
331 static int auto_accept_delay_set(void *data, u64 val)
333 struct hci_dev *hdev = data;
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
342 static int auto_accept_delay_get(void *data, u64 *val)
344 struct hci_dev *hdev = data;
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
359 struct hci_dev *hdev = file->private_data;
362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
368 static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
372 struct hci_dev *hdev = file->private_data;
374 size_t buf_size = min(count, (sizeof(buf)-1));
377 if (test_bit(HCI_UP, &hdev->flags))
380 if (copy_from_user(buf, user_buf, buf_size))
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
395 static const struct file_operations force_sc_support_fops = {
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
405 struct hci_dev *hdev = file->private_data;
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414 static const struct file_operations sc_only_mode_fops = {
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
420 static int idle_timeout_set(void *data, u64 val)
422 struct hci_dev *hdev = data;
424 if (val != 0 && (val < 500 || val > 3600000))
428 hdev->idle_timeout = val;
429 hci_dev_unlock(hdev);
434 static int idle_timeout_get(void *data, u64 *val)
436 struct hci_dev *hdev = data;
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
448 static int rpa_timeout_set(void *data, u64 val)
450 struct hci_dev *hdev = data;
452 /* Require the RPA timeout to be at least 30 seconds and at most
455 if (val < 30 || val > (60 * 60 * 24))
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
465 static int rpa_timeout_get(void *data, u64 *val)
467 struct hci_dev *hdev = data;
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
479 static int sniff_min_interval_set(void *data, u64 val)
481 struct hci_dev *hdev = data;
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
487 hdev->sniff_min_interval = val;
488 hci_dev_unlock(hdev);
493 static int sniff_min_interval_get(void *data, u64 *val)
495 struct hci_dev *hdev = data;
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
507 static int sniff_max_interval_set(void *data, u64 val)
509 struct hci_dev *hdev = data;
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
515 hdev->sniff_max_interval = val;
516 hci_dev_unlock(hdev);
521 static int sniff_max_interval_get(void *data, u64 *val)
523 struct hci_dev *hdev = data;
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
535 static int conn_info_min_age_set(void *data, u64 val)
537 struct hci_dev *hdev = data;
539 if (val == 0 || val > hdev->conn_info_max_age)
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
549 static int conn_info_min_age_get(void *data, u64 *val)
551 struct hci_dev *hdev = data;
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
563 static int conn_info_max_age_set(void *data, u64 val)
565 struct hci_dev *hdev = data;
567 if (val == 0 || val < hdev->conn_info_min_age)
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
577 static int conn_info_max_age_get(void *data, u64 *val)
579 struct hci_dev *hdev = data;
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
591 static int identity_show(struct seq_file *f, void *p)
593 struct hci_dev *hdev = f->private;
599 hci_copy_identity_address(hdev, &addr, &addr_type);
601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602 16, hdev->irk, &hdev->rpa);
604 hci_dev_unlock(hdev);
609 static int identity_open(struct inode *inode, struct file *file)
611 return single_open(file, identity_show, inode->i_private);
614 static const struct file_operations identity_fops = {
615 .open = identity_open,
618 .release = single_release,
621 static int random_address_show(struct seq_file *f, void *p)
623 struct hci_dev *hdev = f->private;
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
632 static int random_address_open(struct inode *inode, struct file *file)
634 return single_open(file, random_address_show, inode->i_private);
637 static const struct file_operations random_address_fops = {
638 .open = random_address_open,
641 .release = single_release,
644 static int static_address_show(struct seq_file *f, void *p)
646 struct hci_dev *hdev = f->private;
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
655 static int static_address_open(struct inode *inode, struct file *file)
657 return single_open(file, static_address_show, inode->i_private);
660 static const struct file_operations static_address_fops = {
661 .open = static_address_open,
664 .release = single_release,
667 static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
671 struct hci_dev *hdev = file->private_data;
674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
680 static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
684 struct hci_dev *hdev = file->private_data;
686 size_t buf_size = min(count, (sizeof(buf)-1));
689 if (test_bit(HCI_UP, &hdev->flags))
692 if (copy_from_user(buf, user_buf, buf_size))
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
707 static const struct file_operations force_static_address_fops = {
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
714 static int white_list_show(struct seq_file *f, void *ptr)
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
727 static int white_list_open(struct inode *inode, struct file *file)
729 return single_open(file, white_list_show, inode->i_private);
732 static const struct file_operations white_list_fops = {
733 .open = white_list_open,
736 .release = single_release,
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
751 hci_dev_unlock(hdev);
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
758 return single_open(file, identity_resolving_keys_show,
762 static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
766 .release = single_release,
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
775 list_for_each_safe(p, n, &hdev->long_term_keys) {
776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780 __le64_to_cpu(ltk->rand), 16, ltk->val);
782 hci_dev_unlock(hdev);
787 static int long_term_keys_open(struct inode *inode, struct file *file)
789 return single_open(file, long_term_keys_show, inode->i_private);
792 static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
796 .release = single_release,
799 static int conn_min_interval_set(void *data, u64 val)
801 struct hci_dev *hdev = data;
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
807 hdev->le_conn_min_interval = val;
808 hci_dev_unlock(hdev);
813 static int conn_min_interval_get(void *data, u64 *val)
815 struct hci_dev *hdev = data;
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
827 static int conn_max_interval_set(void *data, u64 val)
829 struct hci_dev *hdev = data;
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
835 hdev->le_conn_max_interval = val;
836 hci_dev_unlock(hdev);
841 static int conn_max_interval_get(void *data, u64 *val)
843 struct hci_dev *hdev = data;
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
855 static int conn_latency_set(void *data, u64 val)
857 struct hci_dev *hdev = data;
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
869 static int conn_latency_get(void *data, u64 *val)
871 struct hci_dev *hdev = data;
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
883 static int supervision_timeout_set(void *data, u64 val)
885 struct hci_dev *hdev = data;
887 if (val < 0x000a || val > 0x0c80)
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
897 static int supervision_timeout_get(void *data, u64 *val)
899 struct hci_dev *hdev = data;
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
911 static int adv_channel_map_set(void *data, u64 val)
913 struct hci_dev *hdev = data;
915 if (val < 0x01 || val > 0x07)
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
925 static int adv_channel_map_get(void *data, u64 *val)
927 struct hci_dev *hdev = data;
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
939 static int device_list_show(struct seq_file *f, void *ptr)
941 struct hci_dev *hdev = f->private;
942 struct hci_conn_params *p;
945 list_for_each_entry(p, &hdev->le_conn_params, list) {
946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
949 hci_dev_unlock(hdev);
954 static int device_list_open(struct inode *inode, struct file *file)
956 return single_open(file, device_list_show, inode->i_private);
959 static const struct file_operations device_list_fops = {
960 .open = device_list_open,
963 .release = single_release,
966 /* ---- HCI requests ---- */
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1002 hci_dev_unlock(hdev);
1005 return ERR_PTR(-ENODATA);
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1016 if (hdr->evt != event)
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1034 if (opcode == __le16_to_cpu(ev->opcode))
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1042 return ERR_PTR(-ENODATA);
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046 const void *param, u8 event, u32 timeout)
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1052 BT_DBG("%s", hdev->name);
1054 hci_req_init(&req, hdev);
1056 hci_req_add_ev(&req, opcode, plen, param, event);
1058 hdev->req_status = HCI_REQ_PEND;
1060 err = hci_req_run(&req, hci_req_sync_complete);
1062 return ERR_PTR(err);
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1067 schedule_timeout(timeout);
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1074 switch (hdev->req_status) {
1076 err = -bt_to_errno(hdev->req_result);
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1088 hdev->req_status = hdev->req_result = 0;
1090 BT_DBG("%s end: err %d", hdev->name, err);
1093 return ERR_PTR(err);
1095 return hci_get_cmd_complete(hdev, opcode, event);
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100 const void *param, u32 timeout)
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108 void (*func)(struct hci_request *req,
1110 unsigned long opt, __u32 timeout)
1112 struct hci_request req;
1113 DECLARE_WAITQUEUE(wait, current);
1116 BT_DBG("%s start", hdev->name);
1118 hci_req_init(&req, hdev);
1120 hdev->req_status = HCI_REQ_PEND;
1124 err = hci_req_run(&req, hci_req_sync_complete);
1126 hdev->req_status = 0;
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
1133 if (err == -ENODATA)
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1142 schedule_timeout(timeout);
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1146 if (signal_pending(current))
1149 switch (hdev->req_status) {
1151 err = -bt_to_errno(hdev->req_result);
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1163 hdev->req_status = hdev->req_result = 0;
1165 BT_DBG("%s end: err %d", hdev->name, err);
1170 static int hci_req_sync(struct hci_dev *hdev,
1171 void (*req)(struct hci_request *req,
1173 unsigned long opt, __u32 timeout)
1177 if (!test_bit(HCI_UP, &hdev->flags))
1180 /* Serialize all requests */
1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1183 hci_req_unlock(hdev);
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1190 BT_DBG("%s %ld", req->hdev->name, opt);
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1197 static void bredr_init(struct hci_request *req)
1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1201 /* Read Local Supported Features */
1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1204 /* Read Local Version */
1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1207 /* Read BD Address */
1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1211 static void amp_init(struct hci_request *req)
1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1215 /* Read Local Version */
1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1224 /* Read Local AMP Info */
1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1227 /* Read Data Blk size */
1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1239 struct hci_dev *hdev = req->hdev;
1241 BT_DBG("%s %ld", hdev->name, opt);
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245 hci_reset_req(req, 0);
1247 switch (hdev->dev_type) {
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1262 static void bredr_setup(struct hci_request *req)
1264 struct hci_dev *hdev = req->hdev;
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1272 /* Read Class of Device */
1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1275 /* Read Local Name */
1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1278 /* Read Voice Setting */
1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1291 /* Connection accept timeout ~20 secs */
1292 param = cpu_to_le16(0x7d00);
1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1304 static void le_setup(struct hci_request *req)
1306 struct hci_dev *hdev = req->hdev;
1308 /* Read LE Buffer Size */
1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1311 /* Read LE Local Supported Features */
1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1317 /* Read LE Advertising Channel TX Power */
1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1320 /* Read LE White List Size */
1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1333 if (lmp_ext_inq_capable(hdev))
1336 if (lmp_inq_rssi_capable(hdev))
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1363 mode = hci_get_inquiry_mode(req->hdev);
1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1368 static void hci_setup_event_mask(struct hci_request *req)
1370 struct hci_dev *hdev = req->hdev;
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1444 struct hci_dev *hdev = req->hdev;
1446 if (lmp_bredr_capable(hdev))
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1451 if (lmp_le_capable(hdev))
1454 hci_setup_event_mask(req);
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1462 if (lmp_ssp_capable(hdev)) {
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1469 hdev->max_page = 0x01;
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
1476 struct hci_cp_write_eir cp;
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1485 if (lmp_inq_rssi_capable(hdev))
1486 hci_setup_inquiry_mode(req);
1488 if (lmp_inq_tx_pwr_capable(hdev))
1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1506 static void hci_setup_link_policy(struct hci_request *req)
1508 struct hci_dev *hdev = req->hdev;
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1521 cp.policy = cpu_to_le16(link_policy);
1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1525 static void hci_set_le_support(struct hci_request *req)
1527 struct hci_dev *hdev = req->hdev;
1528 struct hci_cp_write_le_host_supported cp;
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1534 memset(&cp, 0, sizeof(cp));
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1538 cp.simul = lmp_le_br_capable(hdev);
1541 if (cp.le != lmp_host_le_capable(hdev))
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1554 if (lmp_csb_master_capable(hdev)) {
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1564 if (lmp_csb_slave_capable(hdev)) {
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1580 struct hci_dev *hdev = req->hdev;
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598 struct hci_cp_delete_stored_link_key cp;
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1606 if (hdev->commands[5] & 0x10)
1607 hci_setup_link_policy(req);
1609 if (lmp_le_capable(hdev)) {
1612 memset(events, 0, sizeof(events));
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1626 hci_set_le_support(req);
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1641 struct hci_dev *hdev = req->hdev;
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1647 /* Check for Synchronization Train support */
1648 if (lmp_sync_train_capable(hdev))
1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1651 /* Enable Secure Connections if supported and configured */
1652 if ((lmp_sc_capable(hdev) ||
1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1661 static int __hci_init(struct hci_dev *hdev)
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1681 if (hdev->dev_type != HCI_BREDR)
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
1728 if (lmp_ssp_capable(hdev)) {
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1746 if (lmp_le_capable(hdev)) {
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1788 &hdev->discov_interleaved_timeout);
1794 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1796 struct hci_dev *hdev = req->hdev;
1798 BT_DBG("%s %ld", hdev->name, opt);
1801 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802 hci_reset_req(req, 0);
1804 /* Read Local Version */
1805 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1807 /* Read BD Address */
1808 if (hdev->set_bdaddr)
1809 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1812 static int __hci_unconf_init(struct hci_dev *hdev)
1816 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1819 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1826 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1830 BT_DBG("%s %x", req->hdev->name, scan);
1832 /* Inquiry and Page scans */
1833 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1836 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1840 BT_DBG("%s %x", req->hdev->name, auth);
1842 /* Authentication */
1843 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1846 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1850 BT_DBG("%s %x", req->hdev->name, encrypt);
1853 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1856 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1858 __le16 policy = cpu_to_le16(opt);
1860 BT_DBG("%s %x", req->hdev->name, policy);
1862 /* Default link policy */
1863 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1866 /* Get HCI device by index.
1867 * Device is held on return. */
1868 struct hci_dev *hci_dev_get(int index)
1870 struct hci_dev *hdev = NULL, *d;
1872 BT_DBG("%d", index);
1877 read_lock(&hci_dev_list_lock);
1878 list_for_each_entry(d, &hci_dev_list, list) {
1879 if (d->id == index) {
1880 hdev = hci_dev_hold(d);
1884 read_unlock(&hci_dev_list_lock);
1888 /* ---- Inquiry support ---- */
1890 bool hci_discovery_active(struct hci_dev *hdev)
1892 struct discovery_state *discov = &hdev->discovery;
1894 switch (discov->state) {
1895 case DISCOVERY_FINDING:
1896 case DISCOVERY_RESOLVING:
1904 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1906 int old_state = hdev->discovery.state;
1908 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1910 if (old_state == state)
1913 hdev->discovery.state = state;
1916 case DISCOVERY_STOPPED:
1917 hci_update_background_scan(hdev);
1919 if (old_state != DISCOVERY_STARTING)
1920 mgmt_discovering(hdev, 0);
1922 case DISCOVERY_STARTING:
1924 case DISCOVERY_FINDING:
1925 mgmt_discovering(hdev, 1);
1927 case DISCOVERY_RESOLVING:
1929 case DISCOVERY_STOPPING:
1934 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1936 struct discovery_state *cache = &hdev->discovery;
1937 struct inquiry_entry *p, *n;
1939 list_for_each_entry_safe(p, n, &cache->all, all) {
1944 INIT_LIST_HEAD(&cache->unknown);
1945 INIT_LIST_HEAD(&cache->resolve);
1948 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1951 struct discovery_state *cache = &hdev->discovery;
1952 struct inquiry_entry *e;
1954 BT_DBG("cache %p, %pMR", cache, bdaddr);
1956 list_for_each_entry(e, &cache->all, all) {
1957 if (!bacmp(&e->data.bdaddr, bdaddr))
1964 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1967 struct discovery_state *cache = &hdev->discovery;
1968 struct inquiry_entry *e;
1970 BT_DBG("cache %p, %pMR", cache, bdaddr);
1972 list_for_each_entry(e, &cache->unknown, list) {
1973 if (!bacmp(&e->data.bdaddr, bdaddr))
1980 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1984 struct discovery_state *cache = &hdev->discovery;
1985 struct inquiry_entry *e;
1987 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1989 list_for_each_entry(e, &cache->resolve, list) {
1990 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1992 if (!bacmp(&e->data.bdaddr, bdaddr))
1999 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2000 struct inquiry_entry *ie)
2002 struct discovery_state *cache = &hdev->discovery;
2003 struct list_head *pos = &cache->resolve;
2004 struct inquiry_entry *p;
2006 list_del(&ie->list);
2008 list_for_each_entry(p, &cache->resolve, list) {
2009 if (p->name_state != NAME_PENDING &&
2010 abs(p->data.rssi) >= abs(ie->data.rssi))
2015 list_add(&ie->list, pos);
2018 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2021 struct discovery_state *cache = &hdev->discovery;
2022 struct inquiry_entry *ie;
2025 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2027 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2029 if (!data->ssp_mode)
2030 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2032 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2034 if (!ie->data.ssp_mode)
2035 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2037 if (ie->name_state == NAME_NEEDED &&
2038 data->rssi != ie->data.rssi) {
2039 ie->data.rssi = data->rssi;
2040 hci_inquiry_cache_update_resolve(hdev, ie);
2046 /* Entry not in the cache. Add new one. */
2047 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2049 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2053 list_add(&ie->all, &cache->all);
2056 ie->name_state = NAME_KNOWN;
2058 ie->name_state = NAME_NOT_KNOWN;
2059 list_add(&ie->list, &cache->unknown);
2063 if (name_known && ie->name_state != NAME_KNOWN &&
2064 ie->name_state != NAME_PENDING) {
2065 ie->name_state = NAME_KNOWN;
2066 list_del(&ie->list);
2069 memcpy(&ie->data, data, sizeof(*data));
2070 ie->timestamp = jiffies;
2071 cache->timestamp = jiffies;
2073 if (ie->name_state == NAME_NOT_KNOWN)
2074 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2080 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2082 struct discovery_state *cache = &hdev->discovery;
2083 struct inquiry_info *info = (struct inquiry_info *) buf;
2084 struct inquiry_entry *e;
2087 list_for_each_entry(e, &cache->all, all) {
2088 struct inquiry_data *data = &e->data;
2093 bacpy(&info->bdaddr, &data->bdaddr);
2094 info->pscan_rep_mode = data->pscan_rep_mode;
2095 info->pscan_period_mode = data->pscan_period_mode;
2096 info->pscan_mode = data->pscan_mode;
2097 memcpy(info->dev_class, data->dev_class, 3);
2098 info->clock_offset = data->clock_offset;
2104 BT_DBG("cache %p, copied %d", cache, copied);
2108 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2110 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2111 struct hci_dev *hdev = req->hdev;
2112 struct hci_cp_inquiry cp;
2114 BT_DBG("%s", hdev->name);
2116 if (test_bit(HCI_INQUIRY, &hdev->flags))
2120 memcpy(&cp.lap, &ir->lap, 3);
2121 cp.length = ir->length;
2122 cp.num_rsp = ir->num_rsp;
2123 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2126 static int wait_inquiry(void *word)
2129 return signal_pending(current);
2132 int hci_inquiry(void __user *arg)
2134 __u8 __user *ptr = arg;
2135 struct hci_inquiry_req ir;
2136 struct hci_dev *hdev;
2137 int err = 0, do_inquiry = 0, max_rsp;
2141 if (copy_from_user(&ir, ptr, sizeof(ir)))
2144 hdev = hci_dev_get(ir.dev_id);
2148 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2153 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2158 if (hdev->dev_type != HCI_BREDR) {
2163 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2169 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2170 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2171 hci_inquiry_cache_flush(hdev);
2174 hci_dev_unlock(hdev);
2176 timeo = ir.length * msecs_to_jiffies(2000);
2179 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2184 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2185 * cleared). If it is interrupted by a signal, return -EINTR.
2187 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2188 TASK_INTERRUPTIBLE))
2192 /* for unlimited number of responses we will use buffer with
2195 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2197 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2198 * copy it to the user space.
2200 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2207 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2208 hci_dev_unlock(hdev);
2210 BT_DBG("num_rsp %d", ir.num_rsp);
2212 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2214 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2227 static int hci_dev_do_open(struct hci_dev *hdev)
2231 BT_DBG("%s %p", hdev->name, hdev);
2235 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2240 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2241 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2242 /* Check for rfkill but allow the HCI setup stage to
2243 * proceed (which in itself doesn't cause any RF activity).
2245 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2250 /* Check for valid public address or a configured static
2251 * random adddress, but let the HCI setup proceed to
2252 * be able to determine if there is a public address
2255 * In case of user channel usage, it is not important
2256 * if a public address or static random address is
2259 * This check is only valid for BR/EDR controllers
2260 * since AMP controllers do not have an address.
2262 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2263 hdev->dev_type == HCI_BREDR &&
2264 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2265 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2266 ret = -EADDRNOTAVAIL;
2271 if (test_bit(HCI_UP, &hdev->flags)) {
2276 if (hdev->open(hdev)) {
2281 atomic_set(&hdev->cmd_cnt, 1);
2282 set_bit(HCI_INIT, &hdev->flags);
2284 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2286 ret = hdev->setup(hdev);
2288 /* The transport driver can set these quirks before
2289 * creating the HCI device or in its setup callback.
2291 * In case any of them is set, the controller has to
2292 * start up as unconfigured.
2294 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2295 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2296 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2298 /* For an unconfigured controller it is required to
2299 * read at least the version information provided by
2300 * the Read Local Version Information command.
2302 * If the set_bdaddr driver callback is provided, then
2303 * also the original Bluetooth public device address
2304 * will be read using the Read BD Address command.
2306 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2307 ret = __hci_unconf_init(hdev);
2310 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2311 /* If public address change is configured, ensure that
2312 * the address gets programmed. If the driver does not
2313 * support changing the public address, fail the power
2316 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2318 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2320 ret = -EADDRNOTAVAIL;
2324 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2325 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2326 ret = __hci_init(hdev);
2329 clear_bit(HCI_INIT, &hdev->flags);
2333 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2334 set_bit(HCI_UP, &hdev->flags);
2335 hci_notify(hdev, HCI_DEV_UP);
2336 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2337 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2338 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2339 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2340 hdev->dev_type == HCI_BREDR) {
2342 mgmt_powered(hdev, 1);
2343 hci_dev_unlock(hdev);
2346 /* Init failed, cleanup */
2347 flush_work(&hdev->tx_work);
2348 flush_work(&hdev->cmd_work);
2349 flush_work(&hdev->rx_work);
2351 skb_queue_purge(&hdev->cmd_q);
2352 skb_queue_purge(&hdev->rx_q);
2357 if (hdev->sent_cmd) {
2358 kfree_skb(hdev->sent_cmd);
2359 hdev->sent_cmd = NULL;
2363 hdev->flags &= BIT(HCI_RAW);
2367 hci_req_unlock(hdev);
2371 /* ---- HCI ioctl helpers ---- */
2373 int hci_dev_open(__u16 dev)
2375 struct hci_dev *hdev;
2378 hdev = hci_dev_get(dev);
2382 /* Devices that are marked as unconfigured can only be powered
2383 * up as user channel. Trying to bring them up as normal devices
2384 * will result into a failure. Only user channel operation is
2387 * When this function is called for a user channel, the flag
2388 * HCI_USER_CHANNEL will be set first before attempting to
2391 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2392 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2397 /* We need to ensure that no other power on/off work is pending
2398 * before proceeding to call hci_dev_do_open. This is
2399 * particularly important if the setup procedure has not yet
2402 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2403 cancel_delayed_work(&hdev->power_off);
2405 /* After this call it is guaranteed that the setup procedure
2406 * has finished. This means that error conditions like RFKILL
2407 * or no valid public or static random address apply.
2409 flush_workqueue(hdev->req_workqueue);
2411 err = hci_dev_do_open(hdev);
2418 /* This function requires the caller holds hdev->lock */
2419 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2421 struct hci_conn_params *p;
2423 list_for_each_entry(p, &hdev->le_conn_params, list)
2424 list_del_init(&p->action);
2426 BT_DBG("All LE pending actions cleared");
2429 static int hci_dev_do_close(struct hci_dev *hdev)
2431 BT_DBG("%s %p", hdev->name, hdev);
2433 cancel_delayed_work(&hdev->power_off);
2435 hci_req_cancel(hdev, ENODEV);
2438 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2439 cancel_delayed_work_sync(&hdev->cmd_timer);
2440 hci_req_unlock(hdev);
2444 /* Flush RX and TX works */
2445 flush_work(&hdev->tx_work);
2446 flush_work(&hdev->rx_work);
2448 if (hdev->discov_timeout > 0) {
2449 cancel_delayed_work(&hdev->discov_off);
2450 hdev->discov_timeout = 0;
2451 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2452 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2455 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2456 cancel_delayed_work(&hdev->service_cache);
2458 cancel_delayed_work_sync(&hdev->le_scan_disable);
2460 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2461 cancel_delayed_work_sync(&hdev->rpa_expired);
2464 hci_inquiry_cache_flush(hdev);
2465 hci_conn_hash_flush(hdev);
2466 hci_pend_le_actions_clear(hdev);
2467 hci_dev_unlock(hdev);
2469 hci_notify(hdev, HCI_DEV_DOWN);
2475 skb_queue_purge(&hdev->cmd_q);
2476 atomic_set(&hdev->cmd_cnt, 1);
2477 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2478 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2479 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2480 set_bit(HCI_INIT, &hdev->flags);
2481 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2482 clear_bit(HCI_INIT, &hdev->flags);
2485 /* flush cmd work */
2486 flush_work(&hdev->cmd_work);
2489 skb_queue_purge(&hdev->rx_q);
2490 skb_queue_purge(&hdev->cmd_q);
2491 skb_queue_purge(&hdev->raw_q);
2493 /* Drop last sent command */
2494 if (hdev->sent_cmd) {
2495 cancel_delayed_work_sync(&hdev->cmd_timer);
2496 kfree_skb(hdev->sent_cmd);
2497 hdev->sent_cmd = NULL;
2500 kfree_skb(hdev->recv_evt);
2501 hdev->recv_evt = NULL;
2503 /* After this point our queues are empty
2504 * and no tasks are scheduled. */
2508 hdev->flags &= BIT(HCI_RAW);
2509 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2511 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2512 if (hdev->dev_type == HCI_BREDR) {
2514 mgmt_powered(hdev, 0);
2515 hci_dev_unlock(hdev);
2519 /* Controller radio is available but is currently powered down */
2520 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2522 memset(hdev->eir, 0, sizeof(hdev->eir));
2523 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2524 bacpy(&hdev->random_addr, BDADDR_ANY);
2526 hci_req_unlock(hdev);
2532 int hci_dev_close(__u16 dev)
2534 struct hci_dev *hdev;
2537 hdev = hci_dev_get(dev);
2541 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2546 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2547 cancel_delayed_work(&hdev->power_off);
2549 err = hci_dev_do_close(hdev);
2556 int hci_dev_reset(__u16 dev)
2558 struct hci_dev *hdev;
2561 hdev = hci_dev_get(dev);
2567 if (!test_bit(HCI_UP, &hdev->flags)) {
2572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2577 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2583 skb_queue_purge(&hdev->rx_q);
2584 skb_queue_purge(&hdev->cmd_q);
2587 hci_inquiry_cache_flush(hdev);
2588 hci_conn_hash_flush(hdev);
2589 hci_dev_unlock(hdev);
2594 atomic_set(&hdev->cmd_cnt, 1);
2595 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2597 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2600 hci_req_unlock(hdev);
2605 int hci_dev_reset_stat(__u16 dev)
2607 struct hci_dev *hdev;
2610 hdev = hci_dev_get(dev);
2614 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2619 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2624 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2631 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2633 struct hci_dev *hdev;
2634 struct hci_dev_req dr;
2637 if (copy_from_user(&dr, arg, sizeof(dr)))
2640 hdev = hci_dev_get(dr.dev_id);
2644 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2649 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2654 if (hdev->dev_type != HCI_BREDR) {
2659 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2666 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2671 if (!lmp_encrypt_capable(hdev)) {
2676 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2677 /* Auth must be enabled first */
2678 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2684 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2689 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2694 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2698 case HCISETLINKMODE:
2699 hdev->link_mode = ((__u16) dr.dev_opt) &
2700 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2704 hdev->pkt_type = (__u16) dr.dev_opt;
2708 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2709 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2713 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2714 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2727 int hci_get_dev_list(void __user *arg)
2729 struct hci_dev *hdev;
2730 struct hci_dev_list_req *dl;
2731 struct hci_dev_req *dr;
2732 int n = 0, size, err;
2735 if (get_user(dev_num, (__u16 __user *) arg))
2738 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2741 size = sizeof(*dl) + dev_num * sizeof(*dr);
2743 dl = kzalloc(size, GFP_KERNEL);
2749 read_lock(&hci_dev_list_lock);
2750 list_for_each_entry(hdev, &hci_dev_list, list) {
2751 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2752 cancel_delayed_work(&hdev->power_off);
2754 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2755 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2757 (dr + n)->dev_id = hdev->id;
2758 (dr + n)->dev_opt = hdev->flags;
2763 read_unlock(&hci_dev_list_lock);
2766 size = sizeof(*dl) + n * sizeof(*dr);
2768 err = copy_to_user(arg, dl, size);
2771 return err ? -EFAULT : 0;
2774 int hci_get_dev_info(void __user *arg)
2776 struct hci_dev *hdev;
2777 struct hci_dev_info di;
2780 if (copy_from_user(&di, arg, sizeof(di)))
2783 hdev = hci_dev_get(di.dev_id);
2787 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2788 cancel_delayed_work_sync(&hdev->power_off);
2790 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2791 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2793 strcpy(di.name, hdev->name);
2794 di.bdaddr = hdev->bdaddr;
2795 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2796 di.flags = hdev->flags;
2797 di.pkt_type = hdev->pkt_type;
2798 if (lmp_bredr_capable(hdev)) {
2799 di.acl_mtu = hdev->acl_mtu;
2800 di.acl_pkts = hdev->acl_pkts;
2801 di.sco_mtu = hdev->sco_mtu;
2802 di.sco_pkts = hdev->sco_pkts;
2804 di.acl_mtu = hdev->le_mtu;
2805 di.acl_pkts = hdev->le_pkts;
2809 di.link_policy = hdev->link_policy;
2810 di.link_mode = hdev->link_mode;
2812 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2813 memcpy(&di.features, &hdev->features, sizeof(di.features));
2815 if (copy_to_user(arg, &di, sizeof(di)))
2823 /* ---- Interface to HCI drivers ---- */
2825 static int hci_rfkill_set_block(void *data, bool blocked)
2827 struct hci_dev *hdev = data;
2829 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2831 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2835 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2836 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2837 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2838 hci_dev_do_close(hdev);
2840 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2846 static const struct rfkill_ops hci_rfkill_ops = {
2847 .set_block = hci_rfkill_set_block,
2850 static void hci_power_on(struct work_struct *work)
2852 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2855 BT_DBG("%s", hdev->name);
2857 err = hci_dev_do_open(hdev);
2859 mgmt_set_powered_failed(hdev, err);
2863 /* During the HCI setup phase, a few error conditions are
2864 * ignored and they need to be checked now. If they are still
2865 * valid, it is important to turn the device back off.
2867 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2868 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2869 (hdev->dev_type == HCI_BREDR &&
2870 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2871 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2872 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2873 hci_dev_do_close(hdev);
2874 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2875 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2876 HCI_AUTO_OFF_TIMEOUT);
2879 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2880 /* For unconfigured devices, set the HCI_RAW flag
2881 * so that userspace can easily identify them.
2883 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2884 set_bit(HCI_RAW, &hdev->flags);
2886 /* For fully configured devices, this will send
2887 * the Index Added event. For unconfigured devices,
2888 * it will send Unconfigued Index Added event.
2890 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2891 * and no event will be send.
2893 mgmt_index_added(hdev);
2894 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2895 /* When the controller is now configured, then it
2896 * is important to clear the HCI_RAW flag.
2898 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2899 clear_bit(HCI_RAW, &hdev->flags);
2901 /* Powering on the controller with HCI_CONFIG set only
2902 * happens with the transition from unconfigured to
2903 * configured. This will send the Index Added event.
2905 mgmt_index_added(hdev);
2909 static void hci_power_off(struct work_struct *work)
2911 struct hci_dev *hdev = container_of(work, struct hci_dev,
2914 BT_DBG("%s", hdev->name);
2916 hci_dev_do_close(hdev);
2919 static void hci_discov_off(struct work_struct *work)
2921 struct hci_dev *hdev;
2923 hdev = container_of(work, struct hci_dev, discov_off.work);
2925 BT_DBG("%s", hdev->name);
2927 mgmt_discoverable_timeout(hdev);
2930 void hci_uuids_clear(struct hci_dev *hdev)
2932 struct bt_uuid *uuid, *tmp;
2934 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2935 list_del(&uuid->list);
2940 void hci_link_keys_clear(struct hci_dev *hdev)
2942 struct list_head *p, *n;
2944 list_for_each_safe(p, n, &hdev->link_keys) {
2945 struct link_key *key;
2947 key = list_entry(p, struct link_key, list);
2954 void hci_smp_ltks_clear(struct hci_dev *hdev)
2956 struct smp_ltk *k, *tmp;
2958 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2964 void hci_smp_irks_clear(struct hci_dev *hdev)
2966 struct smp_irk *k, *tmp;
2968 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2974 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2978 list_for_each_entry(k, &hdev->link_keys, list)
2979 if (bacmp(bdaddr, &k->bdaddr) == 0)
2985 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2986 u8 key_type, u8 old_key_type)
2989 if (key_type < 0x03)
2992 /* Debug keys are insecure so don't store them persistently */
2993 if (key_type == HCI_LK_DEBUG_COMBINATION)
2996 /* Changed combination key and there's no previous one */
2997 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3000 /* Security mode 3 case */
3004 /* Neither local nor remote side had no-bonding as requirement */
3005 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3008 /* Local side had dedicated bonding as requirement */
3009 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3012 /* Remote side had dedicated bonding as requirement */
3013 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3016 /* If none of the above criteria match, then don't store the key
3021 static bool ltk_type_master(u8 type)
3023 return (type == SMP_LTK);
3026 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3031 list_for_each_entry(k, &hdev->long_term_keys, list) {
3032 if (k->ediv != ediv || k->rand != rand)
3035 if (ltk_type_master(k->type) != master)
3044 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3045 u8 addr_type, bool master)
3049 list_for_each_entry(k, &hdev->long_term_keys, list)
3050 if (addr_type == k->bdaddr_type &&
3051 bacmp(bdaddr, &k->bdaddr) == 0 &&
3052 ltk_type_master(k->type) == master)
3058 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3060 struct smp_irk *irk;
3062 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3063 if (!bacmp(&irk->rpa, rpa))
3067 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3068 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3069 bacpy(&irk->rpa, rpa);
3077 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3080 struct smp_irk *irk;
3082 /* Identity Address must be public or static random */
3083 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3086 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3087 if (addr_type == irk->addr_type &&
3088 bacmp(bdaddr, &irk->bdaddr) == 0)
3095 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3096 bdaddr_t *bdaddr, u8 *val, u8 type,
3097 u8 pin_len, bool *persistent)
3099 struct link_key *key, *old_key;
3102 old_key = hci_find_link_key(hdev, bdaddr);
3104 old_key_type = old_key->type;
3107 old_key_type = conn ? conn->key_type : 0xff;
3108 key = kzalloc(sizeof(*key), GFP_KERNEL);
3111 list_add(&key->list, &hdev->link_keys);
3114 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3116 /* Some buggy controller combinations generate a changed
3117 * combination key for legacy pairing even when there's no
3119 if (type == HCI_LK_CHANGED_COMBINATION &&
3120 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3121 type = HCI_LK_COMBINATION;
3123 conn->key_type = type;
3126 bacpy(&key->bdaddr, bdaddr);
3127 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3128 key->pin_len = pin_len;
3130 if (type == HCI_LK_CHANGED_COMBINATION)
3131 key->type = old_key_type;
3136 *persistent = hci_persistent_key(hdev, conn, type,
3142 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3143 u8 addr_type, u8 type, u8 authenticated,
3144 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3146 struct smp_ltk *key, *old_key;
3147 bool master = ltk_type_master(type);
3149 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3153 key = kzalloc(sizeof(*key), GFP_KERNEL);
3156 list_add(&key->list, &hdev->long_term_keys);
3159 bacpy(&key->bdaddr, bdaddr);
3160 key->bdaddr_type = addr_type;
3161 memcpy(key->val, tk, sizeof(key->val));
3162 key->authenticated = authenticated;
3165 key->enc_size = enc_size;
3171 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3172 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3174 struct smp_irk *irk;
3176 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3178 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3182 bacpy(&irk->bdaddr, bdaddr);
3183 irk->addr_type = addr_type;
3185 list_add(&irk->list, &hdev->identity_resolving_keys);
3188 memcpy(irk->val, val, 16);
3189 bacpy(&irk->rpa, rpa);
3194 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3196 struct link_key *key;
3198 key = hci_find_link_key(hdev, bdaddr);
3202 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3204 list_del(&key->list);
3210 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3212 struct smp_ltk *k, *tmp;
3215 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3216 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3219 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3226 return removed ? 0 : -ENOENT;
3229 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3231 struct smp_irk *k, *tmp;
3233 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3234 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3237 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3244 /* HCI command timer function */
3245 static void hci_cmd_timeout(struct work_struct *work)
3247 struct hci_dev *hdev = container_of(work, struct hci_dev,
3250 if (hdev->sent_cmd) {
3251 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3252 u16 opcode = __le16_to_cpu(sent->opcode);
3254 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3256 BT_ERR("%s command tx timeout", hdev->name);
3259 atomic_set(&hdev->cmd_cnt, 1);
3260 queue_work(hdev->workqueue, &hdev->cmd_work);
3263 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3266 struct oob_data *data;
3268 list_for_each_entry(data, &hdev->remote_oob_data, list)
3269 if (bacmp(bdaddr, &data->bdaddr) == 0)
3275 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3277 struct oob_data *data;
3279 data = hci_find_remote_oob_data(hdev, bdaddr);
3283 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3285 list_del(&data->list);
3291 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3293 struct oob_data *data, *n;
3295 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3296 list_del(&data->list);
3301 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3302 u8 *hash, u8 *randomizer)
3304 struct oob_data *data;
3306 data = hci_find_remote_oob_data(hdev, bdaddr);
3308 data = kmalloc(sizeof(*data), GFP_KERNEL);
3312 bacpy(&data->bdaddr, bdaddr);
3313 list_add(&data->list, &hdev->remote_oob_data);
3316 memcpy(data->hash192, hash, sizeof(data->hash192));
3317 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3319 memset(data->hash256, 0, sizeof(data->hash256));
3320 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3322 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3327 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3328 u8 *hash192, u8 *randomizer192,
3329 u8 *hash256, u8 *randomizer256)
3331 struct oob_data *data;
3333 data = hci_find_remote_oob_data(hdev, bdaddr);
3335 data = kmalloc(sizeof(*data), GFP_KERNEL);
3339 bacpy(&data->bdaddr, bdaddr);
3340 list_add(&data->list, &hdev->remote_oob_data);
3343 memcpy(data->hash192, hash192, sizeof(data->hash192));
3344 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3346 memcpy(data->hash256, hash256, sizeof(data->hash256));
3347 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3349 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3354 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3355 bdaddr_t *bdaddr, u8 type)
3357 struct bdaddr_list *b;
3359 list_for_each_entry(b, bdaddr_list, list) {
3360 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3367 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3369 struct list_head *p, *n;
3371 list_for_each_safe(p, n, bdaddr_list) {
3372 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3379 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3381 struct bdaddr_list *entry;
3383 if (!bacmp(bdaddr, BDADDR_ANY))
3386 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3389 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3393 bacpy(&entry->bdaddr, bdaddr);
3394 entry->bdaddr_type = type;
3396 list_add(&entry->list, list);
3401 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3403 struct bdaddr_list *entry;
3405 if (!bacmp(bdaddr, BDADDR_ANY)) {
3406 hci_bdaddr_list_clear(list);
3410 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3414 list_del(&entry->list);
3420 /* This function requires the caller holds hdev->lock */
3421 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3422 bdaddr_t *addr, u8 addr_type)
3424 struct hci_conn_params *params;
3426 /* The conn params list only contains identity addresses */
3427 if (!hci_is_identity_address(addr, addr_type))
3430 list_for_each_entry(params, &hdev->le_conn_params, list) {
3431 if (bacmp(¶ms->addr, addr) == 0 &&
3432 params->addr_type == addr_type) {
3440 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3442 struct hci_conn *conn;
3444 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3448 if (conn->dst_type != type)
3451 if (conn->state != BT_CONNECTED)
3457 /* This function requires the caller holds hdev->lock */
3458 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3459 bdaddr_t *addr, u8 addr_type)
3461 struct hci_conn_params *param;
3463 /* The list only contains identity addresses */
3464 if (!hci_is_identity_address(addr, addr_type))
3467 list_for_each_entry(param, list, action) {
3468 if (bacmp(¶m->addr, addr) == 0 &&
3469 param->addr_type == addr_type)
3476 /* This function requires the caller holds hdev->lock */
3477 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3478 bdaddr_t *addr, u8 addr_type)
3480 struct hci_conn_params *params;
3482 if (!hci_is_identity_address(addr, addr_type))
3485 params = hci_conn_params_lookup(hdev, addr, addr_type);
3489 params = kzalloc(sizeof(*params), GFP_KERNEL);
3491 BT_ERR("Out of memory");
3495 bacpy(¶ms->addr, addr);
3496 params->addr_type = addr_type;
3498 list_add(¶ms->list, &hdev->le_conn_params);
3499 INIT_LIST_HEAD(¶ms->action);
3501 params->conn_min_interval = hdev->le_conn_min_interval;
3502 params->conn_max_interval = hdev->le_conn_max_interval;
3503 params->conn_latency = hdev->le_conn_latency;
3504 params->supervision_timeout = hdev->le_supv_timeout;
3505 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3507 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3512 /* This function requires the caller holds hdev->lock */
3513 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3516 struct hci_conn_params *params;
3518 params = hci_conn_params_add(hdev, addr, addr_type);
3522 if (params->auto_connect == auto_connect)
3525 list_del_init(¶ms->action);
3527 switch (auto_connect) {
3528 case HCI_AUTO_CONN_DISABLED:
3529 case HCI_AUTO_CONN_LINK_LOSS:
3530 hci_update_background_scan(hdev);
3532 case HCI_AUTO_CONN_REPORT:
3533 list_add(¶ms->action, &hdev->pend_le_reports);
3534 hci_update_background_scan(hdev);
3536 case HCI_AUTO_CONN_ALWAYS:
3537 if (!is_connected(hdev, addr, addr_type)) {
3538 list_add(¶ms->action, &hdev->pend_le_conns);
3539 hci_update_background_scan(hdev);
3544 params->auto_connect = auto_connect;
3546 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3552 /* This function requires the caller holds hdev->lock */
3553 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3555 struct hci_conn_params *params;
3557 params = hci_conn_params_lookup(hdev, addr, addr_type);
3561 list_del(¶ms->action);
3562 list_del(¶ms->list);
3565 hci_update_background_scan(hdev);
3567 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3570 /* This function requires the caller holds hdev->lock */
3571 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3573 struct hci_conn_params *params, *tmp;
3575 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3576 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3578 list_del(¶ms->list);
3582 BT_DBG("All LE disabled connection parameters were removed");
3585 /* This function requires the caller holds hdev->lock */
3586 void hci_conn_params_clear_all(struct hci_dev *hdev)
3588 struct hci_conn_params *params, *tmp;
3590 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3591 list_del(¶ms->action);
3592 list_del(¶ms->list);
3596 hci_update_background_scan(hdev);
3598 BT_DBG("All LE connection parameters were removed");
3601 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3604 BT_ERR("Failed to start inquiry: status %d", status);
3607 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3608 hci_dev_unlock(hdev);
3613 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3615 /* General inquiry access code (GIAC) */
3616 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3617 struct hci_request req;
3618 struct hci_cp_inquiry cp;
3622 BT_ERR("Failed to disable LE scanning: status %d", status);
3626 switch (hdev->discovery.type) {
3627 case DISCOV_TYPE_LE:
3629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3630 hci_dev_unlock(hdev);
3633 case DISCOV_TYPE_INTERLEAVED:
3634 hci_req_init(&req, hdev);
3636 memset(&cp, 0, sizeof(cp));
3637 memcpy(&cp.lap, lap, sizeof(cp.lap));
3638 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3639 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3643 hci_inquiry_cache_flush(hdev);
3645 err = hci_req_run(&req, inquiry_complete);
3647 BT_ERR("Inquiry request failed: err %d", err);
3648 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3651 hci_dev_unlock(hdev);
3656 static void le_scan_disable_work(struct work_struct *work)
3658 struct hci_dev *hdev = container_of(work, struct hci_dev,
3659 le_scan_disable.work);
3660 struct hci_request req;
3663 BT_DBG("%s", hdev->name);
3665 hci_req_init(&req, hdev);
3667 hci_req_add_le_scan_disable(&req);
3669 err = hci_req_run(&req, le_scan_disable_work_complete);
3671 BT_ERR("Disable LE scanning request failed: err %d", err);
3674 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3676 struct hci_dev *hdev = req->hdev;
3678 /* If we're advertising or initiating an LE connection we can't
3679 * go ahead and change the random address at this time. This is
3680 * because the eventual initiator address used for the
3681 * subsequently created connection will be undefined (some
3682 * controllers use the new address and others the one we had
3683 * when the operation started).
3685 * In this kind of scenario skip the update and let the random
3686 * address be updated at the next cycle.
3688 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3689 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3690 BT_DBG("Deferring random address update");
3694 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3697 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3700 struct hci_dev *hdev = req->hdev;
3703 /* If privacy is enabled use a resolvable private address. If
3704 * current RPA has expired or there is something else than
3705 * the current RPA in use, then generate a new one.
3707 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3710 *own_addr_type = ADDR_LE_DEV_RANDOM;
3712 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3713 !bacmp(&hdev->random_addr, &hdev->rpa))
3716 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3718 BT_ERR("%s failed to generate new RPA", hdev->name);
3722 set_random_addr(req, &hdev->rpa);
3724 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3725 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3730 /* In case of required privacy without resolvable private address,
3731 * use an unresolvable private address. This is useful for active
3732 * scanning and non-connectable advertising.
3734 if (require_privacy) {
3737 get_random_bytes(&urpa, 6);
3738 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3740 *own_addr_type = ADDR_LE_DEV_RANDOM;
3741 set_random_addr(req, &urpa);
3745 /* If forcing static address is in use or there is no public
3746 * address use the static address as random address (but skip
3747 * the HCI command if the current random address is already the
3750 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3751 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3752 *own_addr_type = ADDR_LE_DEV_RANDOM;
3753 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3754 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3755 &hdev->static_addr);
3759 /* Neither privacy nor static address is being used so use a
3762 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3767 /* Copy the Identity Address of the controller.
3769 * If the controller has a public BD_ADDR, then by default use that one.
3770 * If this is a LE only controller without a public address, default to
3771 * the static random address.
3773 * For debugging purposes it is possible to force controllers with a
3774 * public address to use the static random address instead.
3776 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3779 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3780 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3781 bacpy(bdaddr, &hdev->static_addr);
3782 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3784 bacpy(bdaddr, &hdev->bdaddr);
3785 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3789 /* Alloc HCI device */
3790 struct hci_dev *hci_alloc_dev(void)
3792 struct hci_dev *hdev;
3794 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3798 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3799 hdev->esco_type = (ESCO_HV1);
3800 hdev->link_mode = (HCI_LM_ACCEPT);
3801 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3802 hdev->io_capability = 0x03; /* No Input No Output */
3803 hdev->manufacturer = 0xffff; /* Default to internal use */
3804 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3805 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3807 hdev->sniff_max_interval = 800;
3808 hdev->sniff_min_interval = 80;
3810 hdev->le_adv_channel_map = 0x07;
3811 hdev->le_scan_interval = 0x0060;
3812 hdev->le_scan_window = 0x0030;
3813 hdev->le_conn_min_interval = 0x0028;
3814 hdev->le_conn_max_interval = 0x0038;
3815 hdev->le_conn_latency = 0x0000;
3816 hdev->le_supv_timeout = 0x002a;
3818 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3819 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3820 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3821 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3823 mutex_init(&hdev->lock);
3824 mutex_init(&hdev->req_lock);
3826 INIT_LIST_HEAD(&hdev->mgmt_pending);
3827 INIT_LIST_HEAD(&hdev->blacklist);
3828 INIT_LIST_HEAD(&hdev->uuids);
3829 INIT_LIST_HEAD(&hdev->link_keys);
3830 INIT_LIST_HEAD(&hdev->long_term_keys);
3831 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3832 INIT_LIST_HEAD(&hdev->remote_oob_data);
3833 INIT_LIST_HEAD(&hdev->le_white_list);
3834 INIT_LIST_HEAD(&hdev->le_conn_params);
3835 INIT_LIST_HEAD(&hdev->pend_le_conns);
3836 INIT_LIST_HEAD(&hdev->pend_le_reports);
3837 INIT_LIST_HEAD(&hdev->conn_hash.list);
3839 INIT_WORK(&hdev->rx_work, hci_rx_work);
3840 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3841 INIT_WORK(&hdev->tx_work, hci_tx_work);
3842 INIT_WORK(&hdev->power_on, hci_power_on);
3844 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3845 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3846 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3848 skb_queue_head_init(&hdev->rx_q);
3849 skb_queue_head_init(&hdev->cmd_q);
3850 skb_queue_head_init(&hdev->raw_q);
3852 init_waitqueue_head(&hdev->req_wait_q);
3854 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3856 hci_init_sysfs(hdev);
3857 discovery_init(hdev);
3861 EXPORT_SYMBOL(hci_alloc_dev);
3863 /* Free HCI device */
3864 void hci_free_dev(struct hci_dev *hdev)
3866 /* will free via device release */
3867 put_device(&hdev->dev);
3869 EXPORT_SYMBOL(hci_free_dev);
3871 /* Register HCI device */
3872 int hci_register_dev(struct hci_dev *hdev)
3876 if (!hdev->open || !hdev->close || !hdev->send)
3879 /* Do not allow HCI_AMP devices to register at index 0,
3880 * so the index can be used as the AMP controller ID.
3882 switch (hdev->dev_type) {
3884 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3887 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3896 sprintf(hdev->name, "hci%d", id);
3899 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3901 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3902 WQ_MEM_RECLAIM, 1, hdev->name);
3903 if (!hdev->workqueue) {
3908 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3909 WQ_MEM_RECLAIM, 1, hdev->name);
3910 if (!hdev->req_workqueue) {
3911 destroy_workqueue(hdev->workqueue);
3916 if (!IS_ERR_OR_NULL(bt_debugfs))
3917 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3919 dev_set_name(&hdev->dev, "%s", hdev->name);
3921 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3923 if (IS_ERR(hdev->tfm_aes)) {
3924 BT_ERR("Unable to create crypto context");
3925 error = PTR_ERR(hdev->tfm_aes);
3926 hdev->tfm_aes = NULL;
3930 error = device_add(&hdev->dev);
3934 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3935 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3938 if (rfkill_register(hdev->rfkill) < 0) {
3939 rfkill_destroy(hdev->rfkill);
3940 hdev->rfkill = NULL;
3944 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3945 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3947 set_bit(HCI_SETUP, &hdev->dev_flags);
3948 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3950 if (hdev->dev_type == HCI_BREDR) {
3951 /* Assume BR/EDR support until proven otherwise (such as
3952 * through reading supported features during init.
3954 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3957 write_lock(&hci_dev_list_lock);
3958 list_add(&hdev->list, &hci_dev_list);
3959 write_unlock(&hci_dev_list_lock);
3961 /* Devices that are marked for raw-only usage are unconfigured
3962 * and should not be included in normal operation.
3964 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3965 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3967 hci_notify(hdev, HCI_DEV_REG);
3970 queue_work(hdev->req_workqueue, &hdev->power_on);
3975 crypto_free_blkcipher(hdev->tfm_aes);
3977 destroy_workqueue(hdev->workqueue);
3978 destroy_workqueue(hdev->req_workqueue);
3980 ida_simple_remove(&hci_index_ida, hdev->id);
3984 EXPORT_SYMBOL(hci_register_dev);
3986 /* Unregister HCI device */
3987 void hci_unregister_dev(struct hci_dev *hdev)
3991 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3993 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3997 write_lock(&hci_dev_list_lock);
3998 list_del(&hdev->list);
3999 write_unlock(&hci_dev_list_lock);
4001 hci_dev_do_close(hdev);
4003 for (i = 0; i < NUM_REASSEMBLY; i++)
4004 kfree_skb(hdev->reassembly[i]);
4006 cancel_work_sync(&hdev->power_on);
4008 if (!test_bit(HCI_INIT, &hdev->flags) &&
4009 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4010 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4012 mgmt_index_removed(hdev);
4013 hci_dev_unlock(hdev);
4016 /* mgmt_index_removed should take care of emptying the
4018 BUG_ON(!list_empty(&hdev->mgmt_pending));
4020 hci_notify(hdev, HCI_DEV_UNREG);
4023 rfkill_unregister(hdev->rfkill);
4024 rfkill_destroy(hdev->rfkill);
4028 crypto_free_blkcipher(hdev->tfm_aes);
4030 device_del(&hdev->dev);
4032 debugfs_remove_recursive(hdev->debugfs);
4034 destroy_workqueue(hdev->workqueue);
4035 destroy_workqueue(hdev->req_workqueue);
4038 hci_bdaddr_list_clear(&hdev->blacklist);
4039 hci_uuids_clear(hdev);
4040 hci_link_keys_clear(hdev);
4041 hci_smp_ltks_clear(hdev);
4042 hci_smp_irks_clear(hdev);
4043 hci_remote_oob_data_clear(hdev);
4044 hci_bdaddr_list_clear(&hdev->le_white_list);
4045 hci_conn_params_clear_all(hdev);
4046 hci_dev_unlock(hdev);
4050 ida_simple_remove(&hci_index_ida, id);
4052 EXPORT_SYMBOL(hci_unregister_dev);
4054 /* Suspend HCI device */
4055 int hci_suspend_dev(struct hci_dev *hdev)
4057 hci_notify(hdev, HCI_DEV_SUSPEND);
4060 EXPORT_SYMBOL(hci_suspend_dev);
4062 /* Resume HCI device */
4063 int hci_resume_dev(struct hci_dev *hdev)
4065 hci_notify(hdev, HCI_DEV_RESUME);
4068 EXPORT_SYMBOL(hci_resume_dev);
4070 /* Receive frame from HCI drivers */
4071 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4073 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4074 && !test_bit(HCI_INIT, &hdev->flags))) {
4080 bt_cb(skb)->incoming = 1;
4083 __net_timestamp(skb);
4085 skb_queue_tail(&hdev->rx_q, skb);
4086 queue_work(hdev->workqueue, &hdev->rx_work);
4090 EXPORT_SYMBOL(hci_recv_frame);
4092 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4093 int count, __u8 index)
4098 struct sk_buff *skb;
4099 struct bt_skb_cb *scb;
4101 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4102 index >= NUM_REASSEMBLY)
4105 skb = hdev->reassembly[index];
4109 case HCI_ACLDATA_PKT:
4110 len = HCI_MAX_FRAME_SIZE;
4111 hlen = HCI_ACL_HDR_SIZE;
4114 len = HCI_MAX_EVENT_SIZE;
4115 hlen = HCI_EVENT_HDR_SIZE;
4117 case HCI_SCODATA_PKT:
4118 len = HCI_MAX_SCO_SIZE;
4119 hlen = HCI_SCO_HDR_SIZE;
4123 skb = bt_skb_alloc(len, GFP_ATOMIC);
4127 scb = (void *) skb->cb;
4129 scb->pkt_type = type;
4131 hdev->reassembly[index] = skb;
4135 scb = (void *) skb->cb;
4136 len = min_t(uint, scb->expect, count);
4138 memcpy(skb_put(skb, len), data, len);
4147 if (skb->len == HCI_EVENT_HDR_SIZE) {
4148 struct hci_event_hdr *h = hci_event_hdr(skb);
4149 scb->expect = h->plen;
4151 if (skb_tailroom(skb) < scb->expect) {
4153 hdev->reassembly[index] = NULL;
4159 case HCI_ACLDATA_PKT:
4160 if (skb->len == HCI_ACL_HDR_SIZE) {
4161 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4162 scb->expect = __le16_to_cpu(h->dlen);
4164 if (skb_tailroom(skb) < scb->expect) {
4166 hdev->reassembly[index] = NULL;
4172 case HCI_SCODATA_PKT:
4173 if (skb->len == HCI_SCO_HDR_SIZE) {
4174 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4175 scb->expect = h->dlen;
4177 if (skb_tailroom(skb) < scb->expect) {
4179 hdev->reassembly[index] = NULL;
4186 if (scb->expect == 0) {
4187 /* Complete frame */
4189 bt_cb(skb)->pkt_type = type;
4190 hci_recv_frame(hdev, skb);
4192 hdev->reassembly[index] = NULL;
4200 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4204 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4208 rem = hci_reassembly(hdev, type, data, count, type - 1);
4212 data += (count - rem);
4218 EXPORT_SYMBOL(hci_recv_fragment);
4220 #define STREAM_REASSEMBLY 0
4222 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4228 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4231 struct { char type; } *pkt;
4233 /* Start of the frame */
4240 type = bt_cb(skb)->pkt_type;
4242 rem = hci_reassembly(hdev, type, data, count,
4247 data += (count - rem);
4253 EXPORT_SYMBOL(hci_recv_stream_fragment);
4255 /* ---- Interface to upper protocols ---- */
4257 int hci_register_cb(struct hci_cb *cb)
4259 BT_DBG("%p name %s", cb, cb->name);
4261 write_lock(&hci_cb_list_lock);
4262 list_add(&cb->list, &hci_cb_list);
4263 write_unlock(&hci_cb_list_lock);
4267 EXPORT_SYMBOL(hci_register_cb);
4269 int hci_unregister_cb(struct hci_cb *cb)
4271 BT_DBG("%p name %s", cb, cb->name);
4273 write_lock(&hci_cb_list_lock);
4274 list_del(&cb->list);
4275 write_unlock(&hci_cb_list_lock);
4279 EXPORT_SYMBOL(hci_unregister_cb);
4281 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4285 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4288 __net_timestamp(skb);
4290 /* Send copy to monitor */
4291 hci_send_to_monitor(hdev, skb);
4293 if (atomic_read(&hdev->promisc)) {
4294 /* Send copy to the sockets */
4295 hci_send_to_sock(hdev, skb);
4298 /* Get rid of skb owner, prior to sending to the driver. */
4301 err = hdev->send(hdev, skb);
4303 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4308 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4310 skb_queue_head_init(&req->cmd_q);
4315 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4317 struct hci_dev *hdev = req->hdev;
4318 struct sk_buff *skb;
4319 unsigned long flags;
4321 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4323 /* If an error occured during request building, remove all HCI
4324 * commands queued on the HCI request queue.
4327 skb_queue_purge(&req->cmd_q);
4331 /* Do not allow empty requests */
4332 if (skb_queue_empty(&req->cmd_q))
4335 skb = skb_peek_tail(&req->cmd_q);
4336 bt_cb(skb)->req.complete = complete;
4338 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4339 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4340 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4342 queue_work(hdev->workqueue, &hdev->cmd_work);
4347 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4348 u32 plen, const void *param)
4350 int len = HCI_COMMAND_HDR_SIZE + plen;
4351 struct hci_command_hdr *hdr;
4352 struct sk_buff *skb;
4354 skb = bt_skb_alloc(len, GFP_ATOMIC);
4358 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4359 hdr->opcode = cpu_to_le16(opcode);
4363 memcpy(skb_put(skb, plen), param, plen);
4365 BT_DBG("skb len %d", skb->len);
4367 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4372 /* Send HCI command */
4373 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4376 struct sk_buff *skb;
4378 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4380 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4382 BT_ERR("%s no memory for command", hdev->name);
4386 /* Stand-alone HCI commands must be flaged as
4387 * single-command requests.
4389 bt_cb(skb)->req.start = true;
4391 skb_queue_tail(&hdev->cmd_q, skb);
4392 queue_work(hdev->workqueue, &hdev->cmd_work);
4397 /* Queue a command to an asynchronous HCI request */
4398 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4399 const void *param, u8 event)
4401 struct hci_dev *hdev = req->hdev;
4402 struct sk_buff *skb;
4404 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4406 /* If an error occured during request building, there is no point in
4407 * queueing the HCI command. We can simply return.
4412 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4414 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4415 hdev->name, opcode);
4420 if (skb_queue_empty(&req->cmd_q))
4421 bt_cb(skb)->req.start = true;
4423 bt_cb(skb)->req.event = event;
4425 skb_queue_tail(&req->cmd_q, skb);
4428 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4431 hci_req_add_ev(req, opcode, plen, param, 0);
4434 /* Get data from the previously sent command */
4435 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4437 struct hci_command_hdr *hdr;
4439 if (!hdev->sent_cmd)
4442 hdr = (void *) hdev->sent_cmd->data;
4444 if (hdr->opcode != cpu_to_le16(opcode))
4447 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4449 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4453 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4455 struct hci_acl_hdr *hdr;
4458 skb_push(skb, HCI_ACL_HDR_SIZE);
4459 skb_reset_transport_header(skb);
4460 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4461 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4462 hdr->dlen = cpu_to_le16(len);
4465 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4466 struct sk_buff *skb, __u16 flags)
4468 struct hci_conn *conn = chan->conn;
4469 struct hci_dev *hdev = conn->hdev;
4470 struct sk_buff *list;
4472 skb->len = skb_headlen(skb);
4475 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4477 switch (hdev->dev_type) {
4479 hci_add_acl_hdr(skb, conn->handle, flags);
4482 hci_add_acl_hdr(skb, chan->handle, flags);
4485 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4489 list = skb_shinfo(skb)->frag_list;
4491 /* Non fragmented */
4492 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4494 skb_queue_tail(queue, skb);
4497 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4499 skb_shinfo(skb)->frag_list = NULL;
4501 /* Queue all fragments atomically */
4502 spin_lock(&queue->lock);
4504 __skb_queue_tail(queue, skb);
4506 flags &= ~ACL_START;
4509 skb = list; list = list->next;
4511 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4512 hci_add_acl_hdr(skb, conn->handle, flags);
4514 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4516 __skb_queue_tail(queue, skb);
4519 spin_unlock(&queue->lock);
4523 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4525 struct hci_dev *hdev = chan->conn->hdev;
4527 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4529 hci_queue_acl(chan, &chan->data_q, skb, flags);
4531 queue_work(hdev->workqueue, &hdev->tx_work);
4535 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4537 struct hci_dev *hdev = conn->hdev;
4538 struct hci_sco_hdr hdr;
4540 BT_DBG("%s len %d", hdev->name, skb->len);
4542 hdr.handle = cpu_to_le16(conn->handle);
4543 hdr.dlen = skb->len;
4545 skb_push(skb, HCI_SCO_HDR_SIZE);
4546 skb_reset_transport_header(skb);
4547 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4549 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4551 skb_queue_tail(&conn->data_q, skb);
4552 queue_work(hdev->workqueue, &hdev->tx_work);
4555 /* ---- HCI TX task (outgoing data) ---- */
4557 /* HCI Connection scheduler */
4558 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4561 struct hci_conn_hash *h = &hdev->conn_hash;
4562 struct hci_conn *conn = NULL, *c;
4563 unsigned int num = 0, min = ~0;
4565 /* We don't have to lock device here. Connections are always
4566 * added and removed with TX task disabled. */
4570 list_for_each_entry_rcu(c, &h->list, list) {
4571 if (c->type != type || skb_queue_empty(&c->data_q))
4574 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4579 if (c->sent < min) {
4584 if (hci_conn_num(hdev, type) == num)
4593 switch (conn->type) {
4595 cnt = hdev->acl_cnt;
4599 cnt = hdev->sco_cnt;
4602 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4606 BT_ERR("Unknown link type");
4614 BT_DBG("conn %p quote %d", conn, *quote);
4618 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4620 struct hci_conn_hash *h = &hdev->conn_hash;
4623 BT_ERR("%s link tx timeout", hdev->name);
4627 /* Kill stalled connections */
4628 list_for_each_entry_rcu(c, &h->list, list) {
4629 if (c->type == type && c->sent) {
4630 BT_ERR("%s killing stalled connection %pMR",
4631 hdev->name, &c->dst);
4632 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4639 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4642 struct hci_conn_hash *h = &hdev->conn_hash;
4643 struct hci_chan *chan = NULL;
4644 unsigned int num = 0, min = ~0, cur_prio = 0;
4645 struct hci_conn *conn;
4646 int cnt, q, conn_num = 0;
4648 BT_DBG("%s", hdev->name);
4652 list_for_each_entry_rcu(conn, &h->list, list) {
4653 struct hci_chan *tmp;
4655 if (conn->type != type)
4658 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4663 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4664 struct sk_buff *skb;
4666 if (skb_queue_empty(&tmp->data_q))
4669 skb = skb_peek(&tmp->data_q);
4670 if (skb->priority < cur_prio)
4673 if (skb->priority > cur_prio) {
4676 cur_prio = skb->priority;
4681 if (conn->sent < min) {
4687 if (hci_conn_num(hdev, type) == conn_num)
4696 switch (chan->conn->type) {
4698 cnt = hdev->acl_cnt;
4701 cnt = hdev->block_cnt;
4705 cnt = hdev->sco_cnt;
4708 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4712 BT_ERR("Unknown link type");
4717 BT_DBG("chan %p quote %d", chan, *quote);
4721 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4723 struct hci_conn_hash *h = &hdev->conn_hash;
4724 struct hci_conn *conn;
4727 BT_DBG("%s", hdev->name);
4731 list_for_each_entry_rcu(conn, &h->list, list) {
4732 struct hci_chan *chan;
4734 if (conn->type != type)
4737 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4742 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4743 struct sk_buff *skb;
4750 if (skb_queue_empty(&chan->data_q))
4753 skb = skb_peek(&chan->data_q);
4754 if (skb->priority >= HCI_PRIO_MAX - 1)
4757 skb->priority = HCI_PRIO_MAX - 1;
4759 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4763 if (hci_conn_num(hdev, type) == num)
4771 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4773 /* Calculate count of blocks used by this packet */
4774 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4777 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4779 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4780 /* ACL tx timeout must be longer than maximum
4781 * link supervision timeout (40.9 seconds) */
4782 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4783 HCI_ACL_TX_TIMEOUT))
4784 hci_link_tx_to(hdev, ACL_LINK);
4788 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4790 unsigned int cnt = hdev->acl_cnt;
4791 struct hci_chan *chan;
4792 struct sk_buff *skb;
4795 __check_timeout(hdev, cnt);
4797 while (hdev->acl_cnt &&
4798 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4799 u32 priority = (skb_peek(&chan->data_q))->priority;
4800 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4801 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4802 skb->len, skb->priority);
4804 /* Stop if priority has changed */
4805 if (skb->priority < priority)
4808 skb = skb_dequeue(&chan->data_q);
4810 hci_conn_enter_active_mode(chan->conn,
4811 bt_cb(skb)->force_active);
4813 hci_send_frame(hdev, skb);
4814 hdev->acl_last_tx = jiffies;
4822 if (cnt != hdev->acl_cnt)
4823 hci_prio_recalculate(hdev, ACL_LINK);
4826 static void hci_sched_acl_blk(struct hci_dev *hdev)
4828 unsigned int cnt = hdev->block_cnt;
4829 struct hci_chan *chan;
4830 struct sk_buff *skb;
4834 __check_timeout(hdev, cnt);
4836 BT_DBG("%s", hdev->name);
4838 if (hdev->dev_type == HCI_AMP)
4843 while (hdev->block_cnt > 0 &&
4844 (chan = hci_chan_sent(hdev, type, "e))) {
4845 u32 priority = (skb_peek(&chan->data_q))->priority;
4846 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4849 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4850 skb->len, skb->priority);
4852 /* Stop if priority has changed */
4853 if (skb->priority < priority)
4856 skb = skb_dequeue(&chan->data_q);
4858 blocks = __get_blocks(hdev, skb);
4859 if (blocks > hdev->block_cnt)
4862 hci_conn_enter_active_mode(chan->conn,
4863 bt_cb(skb)->force_active);
4865 hci_send_frame(hdev, skb);
4866 hdev->acl_last_tx = jiffies;
4868 hdev->block_cnt -= blocks;
4871 chan->sent += blocks;
4872 chan->conn->sent += blocks;
4876 if (cnt != hdev->block_cnt)
4877 hci_prio_recalculate(hdev, type);
4880 static void hci_sched_acl(struct hci_dev *hdev)
4882 BT_DBG("%s", hdev->name);
4884 /* No ACL link over BR/EDR controller */
4885 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4888 /* No AMP link over AMP controller */
4889 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4892 switch (hdev->flow_ctl_mode) {
4893 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4894 hci_sched_acl_pkt(hdev);
4897 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4898 hci_sched_acl_blk(hdev);
4904 static void hci_sched_sco(struct hci_dev *hdev)
4906 struct hci_conn *conn;
4907 struct sk_buff *skb;
4910 BT_DBG("%s", hdev->name);
4912 if (!hci_conn_num(hdev, SCO_LINK))
4915 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4916 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4917 BT_DBG("skb %p len %d", skb, skb->len);
4918 hci_send_frame(hdev, skb);
4921 if (conn->sent == ~0)
4927 static void hci_sched_esco(struct hci_dev *hdev)
4929 struct hci_conn *conn;
4930 struct sk_buff *skb;
4933 BT_DBG("%s", hdev->name);
4935 if (!hci_conn_num(hdev, ESCO_LINK))
4938 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4940 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4941 BT_DBG("skb %p len %d", skb, skb->len);
4942 hci_send_frame(hdev, skb);
4945 if (conn->sent == ~0)
4951 static void hci_sched_le(struct hci_dev *hdev)
4953 struct hci_chan *chan;
4954 struct sk_buff *skb;
4955 int quote, cnt, tmp;
4957 BT_DBG("%s", hdev->name);
4959 if (!hci_conn_num(hdev, LE_LINK))
4962 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4963 /* LE tx timeout must be longer than maximum
4964 * link supervision timeout (40.9 seconds) */
4965 if (!hdev->le_cnt && hdev->le_pkts &&
4966 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4967 hci_link_tx_to(hdev, LE_LINK);
4970 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4972 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4973 u32 priority = (skb_peek(&chan->data_q))->priority;
4974 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4975 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4976 skb->len, skb->priority);
4978 /* Stop if priority has changed */
4979 if (skb->priority < priority)
4982 skb = skb_dequeue(&chan->data_q);
4984 hci_send_frame(hdev, skb);
4985 hdev->le_last_tx = jiffies;
4996 hdev->acl_cnt = cnt;
4999 hci_prio_recalculate(hdev, LE_LINK);
5002 static void hci_tx_work(struct work_struct *work)
5004 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5005 struct sk_buff *skb;
5007 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5008 hdev->sco_cnt, hdev->le_cnt);
5010 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5011 /* Schedule queues and send stuff to HCI driver */
5012 hci_sched_acl(hdev);
5013 hci_sched_sco(hdev);
5014 hci_sched_esco(hdev);
5018 /* Send next queued raw (unknown type) packet */
5019 while ((skb = skb_dequeue(&hdev->raw_q)))
5020 hci_send_frame(hdev, skb);
5023 /* ----- HCI RX task (incoming data processing) ----- */
5025 /* ACL data packet */
5026 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5028 struct hci_acl_hdr *hdr = (void *) skb->data;
5029 struct hci_conn *conn;
5030 __u16 handle, flags;
5032 skb_pull(skb, HCI_ACL_HDR_SIZE);
5034 handle = __le16_to_cpu(hdr->handle);
5035 flags = hci_flags(handle);
5036 handle = hci_handle(handle);
5038 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5041 hdev->stat.acl_rx++;
5044 conn = hci_conn_hash_lookup_handle(hdev, handle);
5045 hci_dev_unlock(hdev);
5048 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5050 /* Send to upper protocol */
5051 l2cap_recv_acldata(conn, skb, flags);
5054 BT_ERR("%s ACL packet for unknown connection handle %d",
5055 hdev->name, handle);
5061 /* SCO data packet */
5062 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5064 struct hci_sco_hdr *hdr = (void *) skb->data;
5065 struct hci_conn *conn;
5068 skb_pull(skb, HCI_SCO_HDR_SIZE);
5070 handle = __le16_to_cpu(hdr->handle);
5072 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5074 hdev->stat.sco_rx++;
5077 conn = hci_conn_hash_lookup_handle(hdev, handle);
5078 hci_dev_unlock(hdev);
5081 /* Send to upper protocol */
5082 sco_recv_scodata(conn, skb);
5085 BT_ERR("%s SCO packet for unknown connection handle %d",
5086 hdev->name, handle);
5092 static bool hci_req_is_complete(struct hci_dev *hdev)
5094 struct sk_buff *skb;
5096 skb = skb_peek(&hdev->cmd_q);
5100 return bt_cb(skb)->req.start;
5103 static void hci_resend_last(struct hci_dev *hdev)
5105 struct hci_command_hdr *sent;
5106 struct sk_buff *skb;
5109 if (!hdev->sent_cmd)
5112 sent = (void *) hdev->sent_cmd->data;
5113 opcode = __le16_to_cpu(sent->opcode);
5114 if (opcode == HCI_OP_RESET)
5117 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5121 skb_queue_head(&hdev->cmd_q, skb);
5122 queue_work(hdev->workqueue, &hdev->cmd_work);
5125 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5127 hci_req_complete_t req_complete = NULL;
5128 struct sk_buff *skb;
5129 unsigned long flags;
5131 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5133 /* If the completed command doesn't match the last one that was
5134 * sent we need to do special handling of it.
5136 if (!hci_sent_cmd_data(hdev, opcode)) {
5137 /* Some CSR based controllers generate a spontaneous
5138 * reset complete event during init and any pending
5139 * command will never be completed. In such a case we
5140 * need to resend whatever was the last sent
5143 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5144 hci_resend_last(hdev);
5149 /* If the command succeeded and there's still more commands in
5150 * this request the request is not yet complete.
5152 if (!status && !hci_req_is_complete(hdev))
5155 /* If this was the last command in a request the complete
5156 * callback would be found in hdev->sent_cmd instead of the
5157 * command queue (hdev->cmd_q).
5159 if (hdev->sent_cmd) {
5160 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5163 /* We must set the complete callback to NULL to
5164 * avoid calling the callback more than once if
5165 * this function gets called again.
5167 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5173 /* Remove all pending commands belonging to this request */
5174 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5175 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5176 if (bt_cb(skb)->req.start) {
5177 __skb_queue_head(&hdev->cmd_q, skb);
5181 req_complete = bt_cb(skb)->req.complete;
5184 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5188 req_complete(hdev, status);
5191 static void hci_rx_work(struct work_struct *work)
5193 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5194 struct sk_buff *skb;
5196 BT_DBG("%s", hdev->name);
5198 while ((skb = skb_dequeue(&hdev->rx_q))) {
5199 /* Send copy to monitor */
5200 hci_send_to_monitor(hdev, skb);
5202 if (atomic_read(&hdev->promisc)) {
5203 /* Send copy to the sockets */
5204 hci_send_to_sock(hdev, skb);
5207 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5212 if (test_bit(HCI_INIT, &hdev->flags)) {
5213 /* Don't process data packets in this states. */
5214 switch (bt_cb(skb)->pkt_type) {
5215 case HCI_ACLDATA_PKT:
5216 case HCI_SCODATA_PKT:
5223 switch (bt_cb(skb)->pkt_type) {
5225 BT_DBG("%s Event packet", hdev->name);
5226 hci_event_packet(hdev, skb);
5229 case HCI_ACLDATA_PKT:
5230 BT_DBG("%s ACL data packet", hdev->name);
5231 hci_acldata_packet(hdev, skb);
5234 case HCI_SCODATA_PKT:
5235 BT_DBG("%s SCO data packet", hdev->name);
5236 hci_scodata_packet(hdev, skb);
5246 static void hci_cmd_work(struct work_struct *work)
5248 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5249 struct sk_buff *skb;
5251 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5252 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5254 /* Send queued commands */
5255 if (atomic_read(&hdev->cmd_cnt)) {
5256 skb = skb_dequeue(&hdev->cmd_q);
5260 kfree_skb(hdev->sent_cmd);
5262 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5263 if (hdev->sent_cmd) {
5264 atomic_dec(&hdev->cmd_cnt);
5265 hci_send_frame(hdev, skb);
5266 if (test_bit(HCI_RESET, &hdev->flags))
5267 cancel_delayed_work(&hdev->cmd_timer);
5269 schedule_delayed_work(&hdev->cmd_timer,
5272 skb_queue_head(&hdev->cmd_q, skb);
5273 queue_work(hdev->workqueue, &hdev->cmd_work);
5278 void hci_req_add_le_scan_disable(struct hci_request *req)
5280 struct hci_cp_le_set_scan_enable cp;
5282 memset(&cp, 0, sizeof(cp));
5283 cp.enable = LE_SCAN_DISABLE;
5284 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5287 void hci_req_add_le_passive_scan(struct hci_request *req)
5289 struct hci_cp_le_set_scan_param param_cp;
5290 struct hci_cp_le_set_scan_enable enable_cp;
5291 struct hci_dev *hdev = req->hdev;
5294 /* Set require_privacy to false since no SCAN_REQ are send
5295 * during passive scanning. Not using an unresolvable address
5296 * here is important so that peer devices using direct
5297 * advertising with our address will be correctly reported
5298 * by the controller.
5300 if (hci_update_random_address(req, false, &own_addr_type))
5303 memset(¶m_cp, 0, sizeof(param_cp));
5304 param_cp.type = LE_SCAN_PASSIVE;
5305 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5306 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5307 param_cp.own_address_type = own_addr_type;
5308 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5311 memset(&enable_cp, 0, sizeof(enable_cp));
5312 enable_cp.enable = LE_SCAN_ENABLE;
5313 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5314 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5318 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5321 BT_DBG("HCI request failed to update background scanning: "
5322 "status 0x%2.2x", status);
5325 /* This function controls the background scanning based on hdev->pend_le_conns
5326 * list. If there are pending LE connection we start the background scanning,
5327 * otherwise we stop it.
5329 * This function requires the caller holds hdev->lock.
5331 void hci_update_background_scan(struct hci_dev *hdev)
5333 struct hci_request req;
5334 struct hci_conn *conn;
5337 if (!test_bit(HCI_UP, &hdev->flags) ||
5338 test_bit(HCI_INIT, &hdev->flags) ||
5339 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5340 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5341 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5342 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5345 /* No point in doing scanning if LE support hasn't been enabled */
5346 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5349 /* If discovery is active don't interfere with it */
5350 if (hdev->discovery.state != DISCOVERY_STOPPED)
5353 hci_req_init(&req, hdev);
5355 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5356 list_empty(&hdev->pend_le_conns) &&
5357 list_empty(&hdev->pend_le_reports)) {
5358 /* If there is no pending LE connections or devices
5359 * to be scanned for, we should stop the background
5363 /* If controller is not scanning we are done. */
5364 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5367 hci_req_add_le_scan_disable(&req);
5369 BT_DBG("%s stopping background scanning", hdev->name);
5371 /* If there is at least one pending LE connection, we should
5372 * keep the background scan running.
5375 /* If controller is connecting, we should not start scanning
5376 * since some controllers are not able to scan and connect at
5379 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5383 /* If controller is currently scanning, we stop it to ensure we
5384 * don't miss any advertising (due to duplicates filter).
5386 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5387 hci_req_add_le_scan_disable(&req);
5389 hci_req_add_le_passive_scan(&req);
5391 BT_DBG("%s starting background scanning", hdev->name);
5394 err = hci_req_run(&req, update_background_scan_complete);
5396 BT_ERR("Failed to run HCI request: err %d", err);